fix: typos

This commit is contained in:
omahs 2025-05-16 08:19:13 +02:00 committed by GitHub
parent 525cf198be
commit 8815fc9860
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 15 additions and 15 deletions

View file

@ -48,7 +48,7 @@ int tts(char *text,int threads, char *dst ) {
// generate audio // generate audio
if (!bark_generate_audio(c, text, threads)) { if (!bark_generate_audio(c, text, threads)) {
fprintf(stderr, "%s: An error occured. If the problem persists, feel free to open an issue to report it.\n", __func__); fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__);
return 1; return 1;
} }

View file

@ -32,7 +32,7 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi
return fiber.ErrBadRequest return fiber.ErrBadRequest
} }
log.Debug().Str("modelName", input.ModelID).Msg("elevenlabs TTS request recieved") log.Debug().Str("modelName", input.ModelID).Msg("elevenlabs TTS request received")
filePath, _, err := backend.ModelTTS(input.Text, voiceID, input.LanguageCode, ml, appConfig, *cfg) filePath, _, err := backend.ModelTTS(input.Text, voiceID, input.LanguageCode, ml, appConfig, *cfg)
if err != nil { if err != nil {

View file

@ -30,7 +30,7 @@ func JINARerankEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a
return fiber.ErrBadRequest return fiber.ErrBadRequest
} }
log.Debug().Str("model", input.Model).Msg("JINA Rerank Request recieved") log.Debug().Str("model", input.Model).Msg("JINA Rerank Request received")
request := &proto.RerankRequest{ request := &proto.RerankRequest{
Query: input.Query, Query: input.Query,

View file

@ -34,7 +34,7 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi
return fiber.ErrBadRequest return fiber.ErrBadRequest
} }
log.Debug().Str("model", input.Model).Msg("LocalAI TTS Request recieved") log.Debug().Str("model", input.Model).Msg("LocalAI TTS Request received")
if cfg.Backend == "" { if cfg.Backend == "" {
if input.Backend != "" { if input.Backend != "" {

View file

@ -28,7 +28,7 @@ func VADEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi
return fiber.ErrBadRequest return fiber.ErrBadRequest
} }
log.Debug().Str("model", input.Model).Msg("LocalAI VAD Request recieved") log.Debug().Str("model", input.Model).Msg("LocalAI VAD Request received")
resp, err := backend.VAD(input, c.Context(), ml, appConfig, *cfg) resp, err := backend.VAD(input, c.Context(), ml, appConfig, *cfg)

View file

@ -9,7 +9,7 @@ ico = "rocket_launch"
### Build ### Build
LocalAI can be built as a container image or as a single, portable binary. Note that the some model architectures might require Python libraries, which are not included in the binary. The binary contains only the core backends written in Go and C++. LocalAI can be built as a container image or as a single, portable binary. Note that some model architectures might require Python libraries, which are not included in the binary. The binary contains only the core backends written in Go and C++.
LocalAI's extensible architecture allows you to add your own backends, which can be written in any language, and as such the container images contains also the Python dependencies to run all the available backends (for example, in order to run backends like __Diffusers__ that allows to generate images and videos from text). LocalAI's extensible architecture allows you to add your own backends, which can be written in any language, and as such the container images contains also the Python dependencies to run all the available backends (for example, in order to run backends like __Diffusers__ that allows to generate images and videos from text).
@ -189,7 +189,7 @@ sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer
- If completions are slow, ensure that `gpu-layers` in your model yaml matches the number of layers from the model in use (or simply use a high number such as 256). - If completions are slow, ensure that `gpu-layers` in your model yaml matches the number of layers from the model in use (or simply use a high number such as 256).
- If you a get a compile error: `error: only virtual member functions can be marked 'final'`, reinstall all the necessary brew packages, clean the build, and try again. - If you get a compile error: `error: only virtual member functions can be marked 'final'`, reinstall all the necessary brew packages, clean the build, and try again.
``` ```
# reinstall build dependencies # reinstall build dependencies

View file

@ -39,7 +39,7 @@ Before you begin, ensure you have a container engine installed if you are not us
## All-in-one images ## All-in-one images
All-In-One images are images that come pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and requires no configuration. Models configuration can be found [here](https://github.com/mudler/LocalAI/tree/master/aio) separated by size. All-In-One images are images that come pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and require no configuration. Models configuration can be found [here](https://github.com/mudler/LocalAI/tree/master/aio) separated by size.
In the AIO images there are models configured with the names of OpenAI models, however, they are really backed by Open Source models. You can find the table below In the AIO images there are models configured with the names of OpenAI models, however, they are really backed by Open Source models. You can find the table below

View file

@ -7,7 +7,7 @@ ico = "rocket_launch"
+++ +++
For installing LocalAI in Kubernetes, the deployment file from the `examples` can be used and customized as prefered: For installing LocalAI in Kubernetes, the deployment file from the `examples` can be used and customized as preferred:
``` ```
kubectl apply -f https://raw.githubusercontent.com/mudler/LocalAI-examples/refs/heads/main/kubernetes/deployment.yaml kubectl apply -f https://raw.githubusercontent.com/mudler/LocalAI-examples/refs/heads/main/kubernetes/deployment.yaml
@ -29,7 +29,7 @@ helm repo update
# Get the values # Get the values
helm show values go-skynet/local-ai > values.yaml helm show values go-skynet/local-ai > values.yaml
# Edit the values value if needed # Edit the values if needed
# vim values.yaml ... # vim values.yaml ...
# Install the helm chart # Install the helm chart

View file

@ -647,7 +647,7 @@ install_docker() {
$SUDO docker volume create local-ai-data $SUDO docker volume create local-ai-data
fi fi
# Check if container is already runnning # Check if container is already running
if $SUDO docker ps -a --format '{{.Names}}' | grep -q local-ai; then if $SUDO docker ps -a --format '{{.Names}}' | grep -q local-ai; then
info "LocalAI Docker container already exists, replacing it..." info "LocalAI Docker container already exists, replacing it..."
$SUDO docker rm -f local-ai $SUDO docker rm -f local-ai

View file

@ -8026,7 +8026,7 @@
Changes since previous Stheno Datasets: Changes since previous Stheno Datasets:
- Included Multi-turn Conversation-based Instruct Datasets to boost multi-turn coherency. # This is a seperate set, not the ones made by Kalomaze and Nopm, that are used in Magnum. They're completely different data. - Included Multi-turn Conversation-based Instruct Datasets to boost multi-turn coherency. # This is a separate set, not the ones made by Kalomaze and Nopm, that are used in Magnum. They're completely different data.
- Replaced Single-Turn Instruct with Better Prompts and Answers by Claude 3.5 Sonnet and Claude 3 Opus. - Replaced Single-Turn Instruct with Better Prompts and Answers by Claude 3.5 Sonnet and Claude 3 Opus.
- Removed c2 Samples -> Underway of re-filtering and masking to use with custom prefills. TBD - Removed c2 Samples -> Underway of re-filtering and masking to use with custom prefills. TBD
- Included 55% more Roleplaying Examples based of [Gryphe's](https://huggingface.co/datasets/Gryphe/Sonnet3.5-Charcard-Roleplay) Charcard RP Sets. Further filtered and cleaned on. - Included 55% more Roleplaying Examples based of [Gryphe's](https://huggingface.co/datasets/Gryphe/Sonnet3.5-Charcard-Roleplay) Charcard RP Sets. Further filtered and cleaned on.

View file

@ -179,7 +179,7 @@ var _ = Describe("Download Test", func() {
}) })
AfterEach(func() { AfterEach(func() {
os.Remove(filePath) // cleanup, also checks existance of filePath` os.Remove(filePath) // cleanup, also checks existence of filePath`
os.Remove(filePath + ".partial") os.Remove(filePath + ".partial")
}) })
}) })

View file

@ -16,7 +16,7 @@ import (
// new idea: what if we declare a struct of these here, and use a loop to check? // new idea: what if we declare a struct of these here, and use a loop to check?
// TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we seperate directories for .bin/.yaml and .tmpl // TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we separate directories for .bin/.yaml and .tmpl
type ModelLoader struct { type ModelLoader struct {
ModelPath string ModelPath string
mu sync.Mutex mu sync.Mutex

View file

@ -255,7 +255,7 @@ func (e *Evaluator) TemplateMessages(messages []schema.Message, config *config.B
marshalAny(i.ToolCalls) marshalAny(i.ToolCalls)
} }
} }
// Special Handling: System. We care if it was printed at all, not the r branch, so check seperately // Special Handling: System. We care if it was printed at all, not the r branch, so check separately
if contentExists && role == "system" { if contentExists && role == "system" {
suppressConfigSystemPrompt = true suppressConfigSystemPrompt = true
} }