feat: include tokens usage for streamed output (#4282)

Use pb.Reply instead of []byte with Reply.GetMessage() in llama grpc to get the proper usage data in reply streaming mode at the last [DONE] frame

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
This commit is contained in:
mintyleaf 2024-11-28 17:47:56 +04:00 committed by GitHub
parent e001fada6c
commit 0d6c3a7d57
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 25 additions and 10 deletions

View file

@ -136,7 +136,7 @@ func (c *Client) LoadModel(ctx context.Context, in *pb.ModelOptions, opts ...grp
return client.LoadModel(ctx, in, opts...)
}
func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(s []byte), opts ...grpc.CallOption) error {
func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(reply *pb.Reply), opts ...grpc.CallOption) error {
if !c.parallel {
c.opMutex.Lock()
defer c.opMutex.Unlock()
@ -158,7 +158,7 @@ func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f fun
}
for {
feature, err := stream.Recv()
reply, err := stream.Recv()
if err == io.EOF {
break
}
@ -167,7 +167,7 @@ func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f fun
return err
}
f(feature.GetMessage())
f(reply)
}
return nil