Fix 'hang' on empty message from the start

Seems like that empty message marker trick was unnecessary
This commit is contained in:
mintyleaf 2024-12-16 23:23:45 +04:00
parent e459118de6
commit f98775ef5c
2 changed files with 2 additions and 9 deletions

View file

@ -118,7 +118,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
var partialRune []byte
err := inferenceModel.PredictStream(ctx, opts, func(reply *proto.Reply) {
msg := reply.GetMessage()
msg := reply.Message
partialRune = append(partialRune, msg...)
tokenUsage.Prompt = int(reply.PromptTokens)

View file

@ -39,15 +39,11 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
responses <- initialMessage
ComputeChoices(req, s, config, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
choices := []schema.Choice{}
if s != "" {
choices = append(choices, schema.Choice{Delta: &schema.Message{Content: &s}, Index: 0})
}
resp := schema.OpenAIResponse{
ID: id,
Created: created,
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: choices,
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0}},
Object: "chat.completion.chunk",
Usage: schema.OpenAIUsage{
PromptTokens: usage.Prompt,
@ -469,9 +465,6 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
toolsCalled := false
for ev := range responses {
usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
if len(ev.Choices) == 0 {
break
}
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
toolsCalled = true
}