From 3bab307904355a422b60d7c0f3afa83fae03ed8b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 27 Aug 2023 10:11:32 +0200 Subject: [PATCH] fix(llama): resolve lora adapters correctly from the model file (#964) **Description** we were otherwise expecting absolute paths. this make it relative to the model file (as someone would expect) **Notes for Reviewers** **[Signed commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)** - [ ] Yes, I signed my commits. --- pkg/backend/llm/llama/llama.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/backend/llm/llama/llama.go b/pkg/backend/llm/llama/llama.go index e4c595e1..b73e624b 100644 --- a/pkg/backend/llm/llama/llama.go +++ b/pkg/backend/llm/llama/llama.go @@ -4,6 +4,7 @@ package llama // It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc) import ( "fmt" + "path/filepath" "github.com/go-skynet/LocalAI/pkg/grpc/base" pb "github.com/go-skynet/LocalAI/pkg/grpc/proto" @@ -36,12 +37,15 @@ func (llm *LLM) Load(opts *pb.ModelOptions) error { llamaOpts = append(llamaOpts, llama.SetMulMatQ(false)) } + // Get base path of opts.ModelFile and use the same for lora (assume the same path) + basePath := filepath.Dir(opts.ModelFile) + if opts.LoraAdapter != "" { - llamaOpts = append(llamaOpts, llama.SetLoraAdapter(opts.LoraAdapter)) + llamaOpts = append(llamaOpts, llama.SetLoraAdapter(filepath.Join(basePath, opts.LoraAdapter))) } if opts.LoraBase != "" { - llamaOpts = append(llamaOpts, llama.SetLoraBase(opts.LoraBase)) + llamaOpts = append(llamaOpts, llama.SetLoraBase(filepath.Join(basePath, opts.LoraBase))) } if opts.ContextSize != 0 {