mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-29 22:20:43 +00:00
Fix 'hang' on empty message from the start
Seems like that empty message marker trick was unnecessary
This commit is contained in:
parent
e459118de6
commit
f98775ef5c
2 changed files with 2 additions and 9 deletions
|
@ -118,7 +118,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||||
|
|
||||||
var partialRune []byte
|
var partialRune []byte
|
||||||
err := inferenceModel.PredictStream(ctx, opts, func(reply *proto.Reply) {
|
err := inferenceModel.PredictStream(ctx, opts, func(reply *proto.Reply) {
|
||||||
msg := reply.GetMessage()
|
msg := reply.Message
|
||||||
partialRune = append(partialRune, msg...)
|
partialRune = append(partialRune, msg...)
|
||||||
|
|
||||||
tokenUsage.Prompt = int(reply.PromptTokens)
|
tokenUsage.Prompt = int(reply.PromptTokens)
|
||||||
|
|
|
@ -39,15 +39,11 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||||
responses <- initialMessage
|
responses <- initialMessage
|
||||||
|
|
||||||
ComputeChoices(req, s, config, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
ComputeChoices(req, s, config, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
||||||
choices := []schema.Choice{}
|
|
||||||
if s != "" {
|
|
||||||
choices = append(choices, schema.Choice{Delta: &schema.Message{Content: &s}, Index: 0})
|
|
||||||
}
|
|
||||||
resp := schema.OpenAIResponse{
|
resp := schema.OpenAIResponse{
|
||||||
ID: id,
|
ID: id,
|
||||||
Created: created,
|
Created: created,
|
||||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||||
Choices: choices,
|
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0}},
|
||||||
Object: "chat.completion.chunk",
|
Object: "chat.completion.chunk",
|
||||||
Usage: schema.OpenAIUsage{
|
Usage: schema.OpenAIUsage{
|
||||||
PromptTokens: usage.Prompt,
|
PromptTokens: usage.Prompt,
|
||||||
|
@ -469,9 +465,6 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||||
toolsCalled := false
|
toolsCalled := false
|
||||||
for ev := range responses {
|
for ev := range responses {
|
||||||
usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
|
usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
|
||||||
if len(ev.Choices) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
|
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
|
||||||
toolsCalled = true
|
toolsCalled = true
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue