feat: include tokens usage for streamed output (#4282)

Use pb.Reply instead of []byte with Reply.GetMessage() in llama grpc to get the proper usage data in reply streaming mode at the last [DONE] frame

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
This commit is contained in:
mintyleaf 2024-11-28 17:47:56 +04:00 committed by GitHub
parent e001fada6c
commit 0d6c3a7d57
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 25 additions and 10 deletions

View file

@ -39,11 +39,15 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
responses <- initialMessage
ComputeChoices(req, s, config, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
choices := []schema.Choice{}
if s != "" {
choices = append(choices, schema.Choice{Delta: &schema.Message{Content: &s}, Index: 0})
}
resp := schema.OpenAIResponse{
ID: id,
Created: created,
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0}},
Choices: choices,
Object: "chat.completion.chunk",
Usage: schema.OpenAIUsage{
PromptTokens: usage.Prompt,
@ -465,6 +469,9 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
toolsCalled := false
for ev := range responses {
usage = &ev.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
if len(ev.Choices) == 0 {
break
}
if len(ev.Choices[0].Delta.ToolCalls) > 0 {
toolsCalled = true
}