feat: move other backends to grpc

This finally makes everything more consistent

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2023-07-15 01:19:43 +02:00
parent 5dcfdbe51d
commit 1d0ed95a54
54 changed files with 3171 additions and 1712 deletions

View file

@ -5,12 +5,15 @@ package falcon
import (
"fmt"
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
ggllm "github.com/mudler/go-ggllm.cpp"
)
type LLM struct {
base.Base
falcon *ggllm.Falcon
}
@ -42,10 +45,6 @@ func (llm *LLM) Load(opts *pb.ModelOptions) error {
return err
}
func (llm *LLM) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func buildPredictOptions(opts *pb.PredictOptions) []ggllm.PredictOption {
predictOptions := []ggllm.PredictOption{
ggllm.SetTemperature(float64(opts.Temperature)),
@ -122,7 +121,7 @@ func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
return llm.falcon.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) {
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
predictOptions := buildPredictOptions(opts)
predictOptions = append(predictOptions, ggllm.SetTokenCallback(func(token string) bool {
@ -140,4 +139,6 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) {
}
close(results)
}()
return nil
}