mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 10:35:01 +00:00
Minor fixes (#285)
This commit is contained in:
parent
7e4616646f
commit
3f739575d8
3 changed files with 70 additions and 59 deletions
5
.github/workflows/bump_deps.yaml
vendored
5
.github/workflows/bump_deps.yaml
vendored
|
@ -9,9 +9,6 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- repository: "go-skynet/go-gpt4all-j.cpp"
|
|
||||||
variable: "GOGPT4ALLJ_VERSION"
|
|
||||||
branch: "master"
|
|
||||||
- repository: "go-skynet/go-llama.cpp"
|
- repository: "go-skynet/go-llama.cpp"
|
||||||
variable: "GOLLAMA_VERSION"
|
variable: "GOLLAMA_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
|
@ -30,7 +27,7 @@ jobs:
|
||||||
- repository: "go-skynet/bloomz.cpp"
|
- repository: "go-skynet/bloomz.cpp"
|
||||||
variable: "BLOOMZ_VERSION"
|
variable: "BLOOMZ_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
- repository: "go-skynet/gpt4all"
|
- repository: "nomic-ai/gpt4all"
|
||||||
variable: "GPT4ALL_VERSION"
|
variable: "GPT4ALL_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
|
@ -696,6 +696,8 @@ curl http://localhost:8080/v1/models
|
||||||
|
|
||||||
### Embeddings
|
### Embeddings
|
||||||
|
|
||||||
|
OpenAI docs: https://platform.openai.com/docs/api-reference/embeddings
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
The embedding endpoint is experimental and enabled only if the model is configured with `embeddings: true` in its `yaml` file, for example:
|
The embedding endpoint is experimental and enabled only if the model is configured with `embeddings: true` in its `yaml` file, for example:
|
||||||
|
@ -742,6 +744,8 @@ curl http://localhost:8080/v1/audio/transcriptions -H "Content-Type: multipart/f
|
||||||
|
|
||||||
### Image generation
|
### Image generation
|
||||||
|
|
||||||
|
OpenAI docs: https://platform.openai.com/docs/api-reference/images/create
|
||||||
|
|
||||||
LocalAI supports generating images with Stable diffusion, running on CPU.
|
LocalAI supports generating images with Stable diffusion, running on CPU.
|
||||||
|
|
||||||
| mode=0 | mode=1 (winograd/sgemm) |
|
| mode=0 | mode=1 (winograd/sgemm) |
|
||||||
|
@ -773,6 +777,8 @@ curl http://localhost:8080/v1/images/generations -H "Content-Type: application/j
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note: image generator supports images up to 512x512. You can use other tools however to upscale the image, for instance: https://github.com/upscayl/upscayl.
|
||||||
|
|
||||||
#### Setup
|
#### Setup
|
||||||
|
|
||||||
Note: In order to use the `images/generation` endpoint, you need to build LocalAI with `GO_TAGS=stablediffusion`.
|
Note: In order to use the `images/generation` endpoint, you need to build LocalAI with `GO_TAGS=stablediffusion`.
|
||||||
|
@ -847,7 +853,7 @@ Yes! If the client uses OpenAI and supports setting a different base URL to send
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
Not currently, as ggml doesn't support GPUs yet: https://github.com/ggerganov/llama.cpp/discussions/915.
|
There is partial GPU support, see build instructions above.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
|
116
api/openai.go
116
api/openai.go
|
@ -289,12 +289,14 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||||
|
|
||||||
mess := []string{}
|
mess := []string{}
|
||||||
for _, i := range input.Messages {
|
for _, i := range input.Messages {
|
||||||
|
var content string
|
||||||
r := config.Roles[i.Role]
|
r := config.Roles[i.Role]
|
||||||
if r == "" {
|
if r != "" {
|
||||||
r = i.Role
|
content = fmt.Sprint(r, " ", i.Content)
|
||||||
|
} else {
|
||||||
|
content = i.Content
|
||||||
}
|
}
|
||||||
|
|
||||||
content := fmt.Sprint(r, " ", i.Content)
|
|
||||||
mess = append(mess, content)
|
mess = append(mess, content)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,66 +493,72 @@ func imageEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, image
|
||||||
|
|
||||||
var result []Item
|
var result []Item
|
||||||
for _, i := range config.PromptStrings {
|
for _, i := range config.PromptStrings {
|
||||||
prompts := strings.Split(i, "|")
|
n := input.N
|
||||||
positive_prompt := prompts[0]
|
if input.N == 0 {
|
||||||
negative_prompt := ""
|
n = 1
|
||||||
if len(prompts) > 1 {
|
|
||||||
negative_prompt = prompts[1]
|
|
||||||
}
|
}
|
||||||
|
for j := 0; j < n; j++ {
|
||||||
|
prompts := strings.Split(i, "|")
|
||||||
|
positive_prompt := prompts[0]
|
||||||
|
negative_prompt := ""
|
||||||
|
if len(prompts) > 1 {
|
||||||
|
negative_prompt = prompts[1]
|
||||||
|
}
|
||||||
|
|
||||||
mode := 0
|
mode := 0
|
||||||
step := 15
|
step := 15
|
||||||
|
|
||||||
if input.Mode != 0 {
|
if input.Mode != 0 {
|
||||||
mode = input.Mode
|
mode = input.Mode
|
||||||
}
|
}
|
||||||
|
|
||||||
if input.Step != 0 {
|
if input.Step != 0 {
|
||||||
step = input.Step
|
step = input.Step
|
||||||
}
|
}
|
||||||
|
|
||||||
tempDir := ""
|
tempDir := ""
|
||||||
if !b64JSON {
|
if !b64JSON {
|
||||||
tempDir = imageDir
|
tempDir = imageDir
|
||||||
}
|
}
|
||||||
// Create a temporary file
|
// Create a temporary file
|
||||||
outputFile, err := ioutil.TempFile(tempDir, "b64")
|
outputFile, err := ioutil.TempFile(tempDir, "b64")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
outputFile.Close()
|
outputFile.Close()
|
||||||
output := outputFile.Name() + ".png"
|
output := outputFile.Name() + ".png"
|
||||||
// Rename the temporary file
|
// Rename the temporary file
|
||||||
err = os.Rename(outputFile.Name(), output)
|
err = os.Rename(outputFile.Name(), output)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
baseURL := c.BaseURL()
|
|
||||||
|
|
||||||
fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, loader, *config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := fn(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
item := &Item{}
|
|
||||||
|
|
||||||
if b64JSON {
|
|
||||||
defer os.RemoveAll(output)
|
|
||||||
data, err := os.ReadFile(output)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
item.B64JSON = base64.StdEncoding.EncodeToString(data)
|
|
||||||
} else {
|
|
||||||
base := filepath.Base(output)
|
|
||||||
item.URL = baseURL + "/generated-images/" + base
|
|
||||||
}
|
|
||||||
|
|
||||||
result = append(result, *item)
|
baseURL := c.BaseURL()
|
||||||
|
|
||||||
|
fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, loader, *config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := fn(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
item := &Item{}
|
||||||
|
|
||||||
|
if b64JSON {
|
||||||
|
defer os.RemoveAll(output)
|
||||||
|
data, err := os.ReadFile(output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
item.B64JSON = base64.StdEncoding.EncodeToString(data)
|
||||||
|
} else {
|
||||||
|
base := filepath.Base(output)
|
||||||
|
item.URL = baseURL + "/generated-images/" + base
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, *item)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &OpenAIResponse{
|
resp := &OpenAIResponse{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue