feat: stream tokens usage (#4415)

* Use pb.Reply instead of []byte with Reply.GetMessage() in llama grpc to get the proper usage data in reply streaming mode at the last [DONE] frame

* Fix 'hang' on empty message from the start

Seems like that empty message marker trick was unnecessary

---------

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
This commit is contained in:
mintyleaf 2024-12-18 12:48:50 +04:00 committed by GitHub
parent fc920cc58a
commit 2bc4b56a79
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 17 additions and 9 deletions

View file

@ -35,7 +35,7 @@ func (e *embedBackend) LoadModel(ctx context.Context, in *pb.ModelOptions, opts
return e.s.LoadModel(ctx, in)
}
func (e *embedBackend) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(s []byte), opts ...grpc.CallOption) error {
func (e *embedBackend) PredictStream(ctx context.Context, in *pb.PredictOptions, f func(reply *pb.Reply), opts ...grpc.CallOption) error {
bs := &embedBackendServerStream{
ctx: ctx,
fn: f,
@ -97,11 +97,11 @@ func (e *embedBackend) GetTokenMetrics(ctx context.Context, in *pb.MetricsReques
type embedBackendServerStream struct {
ctx context.Context
fn func(s []byte)
fn func(reply *pb.Reply)
}
func (e *embedBackendServerStream) Send(reply *pb.Reply) error {
e.fn(reply.GetMessage())
e.fn(reply)
return nil
}