feat: use gRPC for transformers

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2023-07-15 01:19:43 +02:00
parent ae533cadef
commit f2f1d7fe72
19 changed files with 518 additions and 258 deletions

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Dolly struct {
dolly *transformers.Dolly
}
func (llm *Dolly) Load(opts *pb.ModelOptions) error {
model, err := transformers.NewDolly(opts.Model)
llm.dolly = model
return err
}
func (llm *Dolly) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *Dolly) Predict(opts *pb.PredictOptions) (string, error) {
return llm.dolly.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Dolly) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.dolly.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type GPT2 struct {
gpt2 *transformers.GPT2
}
func (llm *GPT2) Load(opts *pb.ModelOptions) error {
model, err := transformers.New(opts.Model)
llm.gpt2 = model
return err
}
func (llm *GPT2) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *GPT2) Predict(opts *pb.PredictOptions) (string, error) {
return llm.gpt2.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *GPT2) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.gpt2.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type GPTJ struct {
gptj *transformers.GPTJ
}
func (llm *GPTJ) Load(opts *pb.ModelOptions) error {
model, err := transformers.NewGPTJ(opts.Model)
llm.gptj = model
return err
}
func (llm *GPTJ) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *GPTJ) Predict(opts *pb.PredictOptions) (string, error) {
return llm.gptj.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *GPTJ) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.gptj.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type GPTNeoX struct {
gptneox *transformers.GPTNeoX
}
func (llm *GPTNeoX) Load(opts *pb.ModelOptions) error {
model, err := transformers.NewGPTNeoX(opts.Model)
llm.gptneox = model
return err
}
func (llm *GPTNeoX) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *GPTNeoX) Predict(opts *pb.PredictOptions) (string, error) {
return llm.gptneox.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *GPTNeoX) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.gptneox.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type MPT struct {
mpt *transformers.MPT
}
func (llm *MPT) Load(opts *pb.ModelOptions) error {
model, err := transformers.NewMPT(opts.Model)
llm.mpt = model
return err
}
func (llm *MPT) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *MPT) Predict(opts *pb.PredictOptions) (string, error) {
return llm.mpt.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *MPT) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.mpt.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -0,0 +1,26 @@
package transformers
import (
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
func buildPredictOptions(opts *pb.PredictOptions) []transformers.PredictOption {
predictOptions := []transformers.PredictOption{
transformers.SetTemperature(float64(opts.Temperature)),
transformers.SetTopP(float64(opts.TopP)),
transformers.SetTopK(int(opts.TopK)),
transformers.SetTokens(int(opts.Tokens)),
transformers.SetThreads(int(opts.Threads)),
}
if opts.Batch != 0 {
predictOptions = append(predictOptions, transformers.SetBatch(int(opts.Batch)))
}
if opts.Seed != 0 {
predictOptions = append(predictOptions, transformers.SetSeed(int(opts.Seed)))
}
return predictOptions
}

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Replit struct {
replit *transformers.Replit
}
func (llm *Replit) Load(opts *pb.ModelOptions) error {
model, err := transformers.NewReplit(opts.Model)
llm.replit = model
return err
}
func (llm *Replit) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *Replit) Predict(opts *pb.PredictOptions) (string, error) {
return llm.replit.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Replit) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.replit.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -0,0 +1,42 @@
package transformers
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Starcoder struct {
starcoder *transformers.Starcoder
}
func (llm *Starcoder) Load(opts *pb.ModelOptions) error {
model, err := transformers.NewStarcoder(opts.Model)
llm.starcoder = model
return err
}
func (llm *Starcoder) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
return nil, fmt.Errorf("not implemented")
}
func (llm *Starcoder) Predict(opts *pb.PredictOptions) (string, error) {
return llm.starcoder.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Starcoder) PredictStream(opts *pb.PredictOptions, results chan string) {
go func() {
res, err := llm.starcoder.Predict(opts.Prompt, buildPredictOptions(opts)...)
if err != nil {
fmt.Println("err: ", err)
}
results <- res
close(results)
}()
}

View file

@ -16,7 +16,6 @@ import (
"github.com/go-skynet/LocalAI/pkg/tts"
bloomz "github.com/go-skynet/bloomz.cpp"
bert "github.com/go-skynet/go-bert.cpp"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
"github.com/hashicorp/go-multierror"
"github.com/hpcloud/tail"
"github.com/phayes/freeport"
@ -55,7 +54,6 @@ var autoLoadBackends []string = []string{
LlamaBackend,
Gpt4All,
RwkvBackend,
//GGLLMFalconBackend,
WhisperBackend,
BertEmbeddingsBackend,
GPTNeoXBackend,
@ -69,40 +67,6 @@ var autoLoadBackends []string = []string{
BloomzBackend,
}
var starCoder = func(modelFile string) (interface{}, error) {
return transformers.NewStarcoder(modelFile)
}
var mpt = func(modelFile string) (interface{}, error) {
return transformers.NewMPT(modelFile)
}
var dolly = func(modelFile string) (interface{}, error) {
return transformers.NewDolly(modelFile)
}
// func ggllmFalcon(opts ...ggllm.ModelOption) func(string) (interface{}, error) {
// return func(s string) (interface{}, error) {
// return ggllm.New(s, opts...)
// }
// }
var gptNeoX = func(modelFile string) (interface{}, error) {
return transformers.NewGPTNeoX(modelFile)
}
var replit = func(modelFile string) (interface{}, error) {
return transformers.NewReplit(modelFile)
}
var gptJ = func(modelFile string) (interface{}, error) {
return transformers.NewGPTJ(modelFile)
}
var falcon = func(modelFile string) (interface{}, error) {
return transformers.NewFalcon(modelFile)
}
var bertEmbeddings = func(modelFile string) (interface{}, error) {
return bert.New(modelFile)
}
@ -111,10 +75,6 @@ var bloomzLM = func(modelFile string) (interface{}, error) {
return bloomz.New(modelFile)
}
var transformersLM = func(modelFile string) (interface{}, error) {
return transformers.New(modelFile)
}
var stableDiffusion = func(assetDir string) (interface{}, error) {
return stablediffusion.New(assetDir)
}
@ -261,34 +221,32 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (model interface{}, err err
log.Debug().Msgf("Loading model %s from %s", o.backendString, o.modelFile)
switch strings.ToLower(o.backendString) {
case LlamaBackend:
// return ml.LoadModel(o.modelFile, llamaLM(o.llamaOpts...))
return ml.LoadModel(o.modelFile, ml.grpcModel(LlamaBackend, o))
case BloomzBackend:
return ml.LoadModel(o.modelFile, bloomzLM)
case GPTJBackend:
return ml.LoadModel(o.modelFile, gptJ)
return ml.LoadModel(o.modelFile, ml.grpcModel(GPTJBackend, o))
case DollyBackend:
return ml.LoadModel(o.modelFile, dolly)
return ml.LoadModel(o.modelFile, ml.grpcModel(DollyBackend, o))
case MPTBackend:
return ml.LoadModel(o.modelFile, mpt)
return ml.LoadModel(o.modelFile, ml.grpcModel(MPTBackend, o))
case Gpt2Backend:
return ml.LoadModel(o.modelFile, transformersLM)
return ml.LoadModel(o.modelFile, ml.grpcModel(Gpt2Backend, o))
case FalconBackend:
return ml.LoadModel(o.modelFile, ml.grpcModel(FalconBackend, o))
case GPTNeoXBackend:
return ml.LoadModel(o.modelFile, gptNeoX)
return ml.LoadModel(o.modelFile, ml.grpcModel(GPTNeoXBackend, o))
case ReplitBackend:
return ml.LoadModel(o.modelFile, replit)
return ml.LoadModel(o.modelFile, ml.grpcModel(ReplitBackend, o))
case StableDiffusionBackend:
return ml.LoadModel(o.modelFile, stableDiffusion)
case PiperBackend:
return ml.LoadModel(o.modelFile, piperTTS(filepath.Join(o.assetDir, "backend-assets", "espeak-ng-data")))
case StarcoderBackend:
return ml.LoadModel(o.modelFile, starCoder)
return ml.LoadModel(o.modelFile, ml.grpcModel(StarcoderBackend, o))
case Gpt4AllLlamaBackend, Gpt4AllMptBackend, Gpt4AllJBackend, Gpt4All:
o.gRPCOptions.LibrarySearchPath = filepath.Join(o.assetDir, "backend-assets", "gpt4all")
return ml.LoadModel(o.modelFile, ml.grpcModel(Gpt4All, o))
// return ml.LoadModel(o.modelFile, gpt4allLM(gpt4all.SetThreads(int(o.threads)), gpt4all.SetLibrarySearchPath(filepath.Join(o.assetDir, "backend-assets", "gpt4all"))))
case BertEmbeddingsBackend:
return ml.LoadModel(o.modelFile, bertEmbeddings)