feat: stream tokens usage (#4415)

* Use pb.Reply instead of []byte with Reply.GetMessage() in llama grpc to get the proper usage data in reply streaming mode at the last [DONE] frame

* Fix 'hang' on empty message from the start

Seems like that empty message marker trick was unnecessary

---------

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
This commit is contained in:
mintyleaf 2024-12-18 12:48:50 +04:00 committed by GitHub
parent fc920cc58a
commit 2bc4b56a79
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 17 additions and 9 deletions

View file

@ -117,8 +117,12 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
ss := ""
var partialRune []byte
err := inferenceModel.PredictStream(ctx, opts, func(chars []byte) {
partialRune = append(partialRune, chars...)
err := inferenceModel.PredictStream(ctx, opts, func(reply *proto.Reply) {
msg := reply.Message
partialRune = append(partialRune, msg...)
tokenUsage.Prompt = int(reply.PromptTokens)
tokenUsage.Completion = int(reply.Tokens)
for len(partialRune) > 0 {
r, size := utf8.DecodeRune(partialRune)
@ -132,6 +136,10 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
partialRune = partialRune[size:]
}
if len(msg) == 0 {
tokenCallback("", tokenUsage)
}
})
return LLMResponse{
Response: ss,