diff --git a/backend/go/bark/gobark.cpp b/backend/go/bark/gobark.cpp index b5f414b8..fa4bb336 100644 --- a/backend/go/bark/gobark.cpp +++ b/backend/go/bark/gobark.cpp @@ -48,7 +48,7 @@ int tts(char *text,int threads, char *dst ) { // generate audio if (!bark_generate_audio(c, text, threads)) { - fprintf(stderr, "%s: An error occured. If the problem persists, feel free to open an issue to report it.\n", __func__); + fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__); return 1; } diff --git a/core/http/endpoints/elevenlabs/tts.go b/core/http/endpoints/elevenlabs/tts.go index 48458870..651a526f 100644 --- a/core/http/endpoints/elevenlabs/tts.go +++ b/core/http/endpoints/elevenlabs/tts.go @@ -32,7 +32,7 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi return fiber.ErrBadRequest } - log.Debug().Str("modelName", input.ModelID).Msg("elevenlabs TTS request recieved") + log.Debug().Str("modelName", input.ModelID).Msg("elevenlabs TTS request received") filePath, _, err := backend.ModelTTS(input.Text, voiceID, input.LanguageCode, ml, appConfig, *cfg) if err != nil { diff --git a/core/http/endpoints/jina/rerank.go b/core/http/endpoints/jina/rerank.go index eb2d1911..26a09c2d 100644 --- a/core/http/endpoints/jina/rerank.go +++ b/core/http/endpoints/jina/rerank.go @@ -30,7 +30,7 @@ func JINARerankEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a return fiber.ErrBadRequest } - log.Debug().Str("model", input.Model).Msg("JINA Rerank Request recieved") + log.Debug().Str("model", input.Model).Msg("JINA Rerank Request received") request := &proto.RerankRequest{ Query: input.Query, diff --git a/core/http/endpoints/localai/tts.go b/core/http/endpoints/localai/tts.go index cc0f8169..90d481bb 100644 --- a/core/http/endpoints/localai/tts.go +++ b/core/http/endpoints/localai/tts.go @@ -34,7 +34,7 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi return fiber.ErrBadRequest } - log.Debug().Str("model", input.Model).Msg("LocalAI TTS Request recieved") + log.Debug().Str("model", input.Model).Msg("LocalAI TTS Request received") if cfg.Backend == "" { if input.Backend != "" { diff --git a/core/http/endpoints/localai/vad.go b/core/http/endpoints/localai/vad.go index d41a29c8..384b9754 100644 --- a/core/http/endpoints/localai/vad.go +++ b/core/http/endpoints/localai/vad.go @@ -28,7 +28,7 @@ func VADEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi return fiber.ErrBadRequest } - log.Debug().Str("model", input.Model).Msg("LocalAI VAD Request recieved") + log.Debug().Str("model", input.Model).Msg("LocalAI VAD Request received") resp, err := backend.VAD(input, c.Context(), ml, appConfig, *cfg) diff --git a/docs/content/docs/getting-started/build.md b/docs/content/docs/getting-started/build.md index 9fff1989..cfec79f0 100644 --- a/docs/content/docs/getting-started/build.md +++ b/docs/content/docs/getting-started/build.md @@ -9,7 +9,7 @@ ico = "rocket_launch" ### Build -LocalAI can be built as a container image or as a single, portable binary. Note that the some model architectures might require Python libraries, which are not included in the binary. The binary contains only the core backends written in Go and C++. +LocalAI can be built as a container image or as a single, portable binary. Note that some model architectures might require Python libraries, which are not included in the binary. The binary contains only the core backends written in Go and C++. LocalAI's extensible architecture allows you to add your own backends, which can be written in any language, and as such the container images contains also the Python dependencies to run all the available backends (for example, in order to run backends like __Diffusers__ that allows to generate images and videos from text). @@ -189,7 +189,7 @@ sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer - If completions are slow, ensure that `gpu-layers` in your model yaml matches the number of layers from the model in use (or simply use a high number such as 256). -- If you a get a compile error: `error: only virtual member functions can be marked 'final'`, reinstall all the necessary brew packages, clean the build, and try again. +- If you get a compile error: `error: only virtual member functions can be marked 'final'`, reinstall all the necessary brew packages, clean the build, and try again. ``` # reinstall build dependencies diff --git a/docs/content/docs/getting-started/container-images.md b/docs/content/docs/getting-started/container-images.md index 6f4b2fc2..89190ec0 100644 --- a/docs/content/docs/getting-started/container-images.md +++ b/docs/content/docs/getting-started/container-images.md @@ -39,7 +39,7 @@ Before you begin, ensure you have a container engine installed if you are not us ## All-in-one images -All-In-One images are images that come pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and requires no configuration. Models configuration can be found [here](https://github.com/mudler/LocalAI/tree/master/aio) separated by size. +All-In-One images are images that come pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and require no configuration. Models configuration can be found [here](https://github.com/mudler/LocalAI/tree/master/aio) separated by size. In the AIO images there are models configured with the names of OpenAI models, however, they are really backed by Open Source models. You can find the table below diff --git a/docs/content/docs/getting-started/kubernetes.md b/docs/content/docs/getting-started/kubernetes.md index aea28f3e..bc3902c5 100644 --- a/docs/content/docs/getting-started/kubernetes.md +++ b/docs/content/docs/getting-started/kubernetes.md @@ -7,7 +7,7 @@ ico = "rocket_launch" +++ -For installing LocalAI in Kubernetes, the deployment file from the `examples` can be used and customized as prefered: +For installing LocalAI in Kubernetes, the deployment file from the `examples` can be used and customized as preferred: ``` kubectl apply -f https://raw.githubusercontent.com/mudler/LocalAI-examples/refs/heads/main/kubernetes/deployment.yaml @@ -29,7 +29,7 @@ helm repo update # Get the values helm show values go-skynet/local-ai > values.yaml -# Edit the values value if needed +# Edit the values if needed # vim values.yaml ... # Install the helm chart diff --git a/docs/static/install.sh b/docs/static/install.sh index 33e13375..4ee607e0 100755 --- a/docs/static/install.sh +++ b/docs/static/install.sh @@ -647,7 +647,7 @@ install_docker() { $SUDO docker volume create local-ai-data fi - # Check if container is already runnning + # Check if container is already running if $SUDO docker ps -a --format '{{.Names}}' | grep -q local-ai; then info "LocalAI Docker container already exists, replacing it..." $SUDO docker rm -f local-ai diff --git a/gallery/index.yaml b/gallery/index.yaml index bd5085d3..4ce3df25 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -8026,7 +8026,7 @@ Changes since previous Stheno Datasets: - - Included Multi-turn Conversation-based Instruct Datasets to boost multi-turn coherency. # This is a seperate set, not the ones made by Kalomaze and Nopm, that are used in Magnum. They're completely different data. + - Included Multi-turn Conversation-based Instruct Datasets to boost multi-turn coherency. # This is a separate set, not the ones made by Kalomaze and Nopm, that are used in Magnum. They're completely different data. - Replaced Single-Turn Instruct with Better Prompts and Answers by Claude 3.5 Sonnet and Claude 3 Opus. - Removed c2 Samples -> Underway of re-filtering and masking to use with custom prefills. TBD - Included 55% more Roleplaying Examples based of [Gryphe's](https://huggingface.co/datasets/Gryphe/Sonnet3.5-Charcard-Roleplay) Charcard RP Sets. Further filtered and cleaned on. diff --git a/pkg/downloader/uri_test.go b/pkg/downloader/uri_test.go index 6976c9b4..17ade771 100644 --- a/pkg/downloader/uri_test.go +++ b/pkg/downloader/uri_test.go @@ -179,7 +179,7 @@ var _ = Describe("Download Test", func() { }) AfterEach(func() { - os.Remove(filePath) // cleanup, also checks existance of filePath` + os.Remove(filePath) // cleanup, also checks existence of filePath` os.Remove(filePath + ".partial") }) }) diff --git a/pkg/model/loader.go b/pkg/model/loader.go index e74ea97b..5ecd7e90 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -16,7 +16,7 @@ import ( // new idea: what if we declare a struct of these here, and use a loop to check? -// TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we seperate directories for .bin/.yaml and .tmpl +// TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we separate directories for .bin/.yaml and .tmpl type ModelLoader struct { ModelPath string mu sync.Mutex diff --git a/pkg/templates/evaluator.go b/pkg/templates/evaluator.go index aedf7b41..78de7582 100644 --- a/pkg/templates/evaluator.go +++ b/pkg/templates/evaluator.go @@ -255,7 +255,7 @@ func (e *Evaluator) TemplateMessages(messages []schema.Message, config *config.B marshalAny(i.ToolCalls) } } - // Special Handling: System. We care if it was printed at all, not the r branch, so check seperately + // Special Handling: System. We care if it was printed at all, not the r branch, so check separately if contentExists && role == "system" { suppressConfigSystemPrompt = true }