mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 02:24:59 +00:00
refactor: backend/service split, channel-based llm flow (#1963)
Refactor: channel based llm flow and services split --------- Signed-off-by: Dave Lee <dave@gray101.com>
This commit is contained in:
parent
1981154f49
commit
eed5706994
52 changed files with 3064 additions and 2279 deletions
|
@ -81,7 +81,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string
|
|||
if _, err := os.Stat(uri); err == nil {
|
||||
serverAddress, err := getFreeAddress()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed allocating free ports: %s", err.Error())
|
||||
return "", fmt.Errorf("%s failed allocating free ports: %s", backend, err.Error())
|
||||
}
|
||||
// Make sure the process is executable
|
||||
if err := ml.startProcess(uri, o.model, serverAddress); err != nil {
|
||||
|
@ -134,7 +134,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string
|
|||
|
||||
if !ready {
|
||||
log.Debug().Msgf("GRPC Service NOT ready")
|
||||
return "", fmt.Errorf("grpc service not ready")
|
||||
return "", fmt.Errorf("%s grpc service not ready", backend)
|
||||
}
|
||||
|
||||
options := *o.gRPCOptions
|
||||
|
@ -145,10 +145,10 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string
|
|||
|
||||
res, err := client.GRPC(o.parallelRequests, ml.wd).LoadModel(o.context, &options)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not load model: %w", err)
|
||||
return "", fmt.Errorf("\"%s\" could not load model: %w", backend, err)
|
||||
}
|
||||
if !res.Success {
|
||||
return "", fmt.Errorf("could not load model (no success): %s", res.Message)
|
||||
return "", fmt.Errorf("\"%s\" could not load model (no success): %s", backend, res.Message)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue