mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 18:45:00 +00:00
feat: add stream events (#152)
This commit is contained in:
parent
0a00a4b58e
commit
220d6fd59b
3 changed files with 67 additions and 46 deletions
4
.github/workflows/image.yml
vendored
4
.github/workflows/image.yml
vendored
|
@ -54,8 +54,8 @@ jobs:
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
password: ${{ secrets.QUAY_PASSWORD }}
|
password: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
- name: Build
|
- name: Build
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
|
|
|
@ -2,6 +2,7 @@ package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
@ -245,7 +246,7 @@ func completionEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||||
|
|
||||||
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
||||||
*c = append(*c, Choice{Text: s})
|
*c = append(*c, Choice{Text: s})
|
||||||
})
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -290,8 +291,9 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||||
|
|
||||||
if input.Stream {
|
if input.Stream {
|
||||||
log.Debug().Msgf("Stream request received")
|
log.Debug().Msgf("Stream request received")
|
||||||
|
c.Context().SetContentType("text/event-stream")
|
||||||
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
||||||
c.Set("Content-Type", "text/event-stream; charset=utf-8")
|
// c.Set("Content-Type", "text/event-stream")
|
||||||
c.Set("Cache-Control", "no-cache")
|
c.Set("Cache-Control", "no-cache")
|
||||||
c.Set("Connection", "keep-alive")
|
c.Set("Connection", "keep-alive")
|
||||||
c.Set("Transfer-Encoding", "chunked")
|
c.Set("Transfer-Encoding", "chunked")
|
||||||
|
@ -312,13 +314,52 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||||
log.Debug().Msgf("Template found, input modified to: %s", predInput)
|
log.Debug().Msgf("Template found, input modified to: %s", predInput)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if input.Stream {
|
||||||
|
responses := make(chan OpenAIResponse)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {}, func(s string) bool {
|
||||||
|
resp := OpenAIResponse{
|
||||||
|
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||||
|
Choices: []Choice{{Delta: &Message{Role: "assistant", Content: s}}},
|
||||||
|
Object: "chat.completion.chunk",
|
||||||
|
}
|
||||||
|
|
||||||
|
responses <- resp
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
close(responses)
|
||||||
|
}()
|
||||||
|
|
||||||
|
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
||||||
|
|
||||||
|
for ev := range responses {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
enc := json.NewEncoder(&buf)
|
||||||
|
enc.Encode(ev)
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "event: data\n\n")
|
||||||
|
fmt.Fprintf(w, "data: %v\n\n", buf.String())
|
||||||
|
log.Debug().Msgf("Sending chunk: %s", buf.String())
|
||||||
|
w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteString("event: data\n\n")
|
||||||
|
resp := &OpenAIResponse{
|
||||||
|
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||||
|
Choices: []Choice{{FinishReason: "stop"}},
|
||||||
|
}
|
||||||
|
respData, _ := json.Marshal(resp)
|
||||||
|
|
||||||
|
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
|
||||||
|
w.Flush()
|
||||||
|
}))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
||||||
if input.Stream {
|
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: s}})
|
||||||
*c = append(*c, Choice{Delta: &Message{Role: "assistant", Content: s}})
|
}, nil)
|
||||||
} else {
|
|
||||||
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: s}})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -329,36 +370,6 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||||
Object: "chat.completion",
|
Object: "chat.completion",
|
||||||
}
|
}
|
||||||
|
|
||||||
if input.Stream {
|
|
||||||
resp.Object = "chat.completion.chunk"
|
|
||||||
jsonResult, _ := json.Marshal(resp)
|
|
||||||
log.Debug().Msgf("Response: %s", jsonResult)
|
|
||||||
log.Debug().Msgf("Handling stream request")
|
|
||||||
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
|
||||||
fmt.Fprintf(w, "event: data\n")
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "data: %s\n\n", jsonResult)
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "event: data\n")
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
resp := &OpenAIResponse{
|
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: []Choice{{FinishReason: "stop"}},
|
|
||||||
}
|
|
||||||
respData, _ := json.Marshal(resp)
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "data: %s\n\n", respData)
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
// fmt.Fprintf(w, "data: [DONE]\n\n")
|
|
||||||
// w.Flush()
|
|
||||||
}))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the prediction in the response body
|
// Return the prediction in the response body
|
||||||
return c.JSON(resp)
|
return c.JSON(resp)
|
||||||
}
|
}
|
||||||
|
@ -392,7 +403,7 @@ func editEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||||
|
|
||||||
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
||||||
*c = append(*c, Choice{Text: s})
|
*c = append(*c, Choice{Text: s})
|
||||||
})
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,12 +16,12 @@ import (
|
||||||
var mutexMap sync.Mutex
|
var mutexMap sync.Mutex
|
||||||
var mutexes map[string]*sync.Mutex = make(map[string]*sync.Mutex)
|
var mutexes map[string]*sync.Mutex = make(map[string]*sync.Mutex)
|
||||||
|
|
||||||
func ModelInference(s string, loader *model.ModelLoader, c Config) (func() (string, error), error) {
|
func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback func(string) bool) (func() (string, error), error) {
|
||||||
var model *llama.LLama
|
var model *llama.LLama
|
||||||
var gptModel *gptj.GPTJ
|
var gptModel *gptj.GPTJ
|
||||||
var gpt2Model *gpt2.GPT2
|
var gpt2Model *gpt2.GPT2
|
||||||
var stableLMModel *gpt2.StableLM
|
var stableLMModel *gpt2.StableLM
|
||||||
|
supportStreams := false
|
||||||
modelFile := c.Model
|
modelFile := c.Model
|
||||||
|
|
||||||
// Try to load the model
|
// Try to load the model
|
||||||
|
@ -125,7 +125,13 @@ func ModelInference(s string, loader *model.ModelLoader, c Config) (func() (stri
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
case model != nil:
|
case model != nil:
|
||||||
|
supportStreams = true
|
||||||
fn = func() (string, error) {
|
fn = func() (string, error) {
|
||||||
|
|
||||||
|
if tokenCallback != nil {
|
||||||
|
model.SetTokenCallback(tokenCallback)
|
||||||
|
}
|
||||||
|
|
||||||
// Generate the prediction using the language model
|
// Generate the prediction using the language model
|
||||||
predictOptions := []llama.PredictOption{
|
predictOptions := []llama.PredictOption{
|
||||||
llama.SetTemperature(c.Temperature),
|
llama.SetTemperature(c.Temperature),
|
||||||
|
@ -185,11 +191,15 @@ func ModelInference(s string, loader *model.ModelLoader, c Config) (func() (stri
|
||||||
l.Lock()
|
l.Lock()
|
||||||
defer l.Unlock()
|
defer l.Unlock()
|
||||||
|
|
||||||
return fn()
|
res, err := fn()
|
||||||
|
if tokenCallback != nil && !supportStreams {
|
||||||
|
tokenCallback(res)
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ComputeChoices(predInput string, input *OpenAIRequest, config *Config, loader *model.ModelLoader, cb func(string, *[]Choice)) ([]Choice, error) {
|
func ComputeChoices(predInput string, input *OpenAIRequest, config *Config, loader *model.ModelLoader, cb func(string, *[]Choice), tokenCallback func(string) bool) ([]Choice, error) {
|
||||||
result := []Choice{}
|
result := []Choice{}
|
||||||
|
|
||||||
n := input.N
|
n := input.N
|
||||||
|
@ -199,7 +209,7 @@ func ComputeChoices(predInput string, input *OpenAIRequest, config *Config, load
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the model function to call for the result
|
// get the model function to call for the result
|
||||||
predFunc, err := ModelInference(predInput, loader, *config)
|
predFunc, err := ModelInference(predInput, loader, *config, tokenCallback)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue