From 1f29b5f38e73a9b8959ecc7a1f813f8652c7e1df Mon Sep 17 00:00:00 2001 From: FT <140458077+zeevick10@users.noreply.github.com> Date: Sun, 15 Jun 2025 16:04:44 +0200 Subject: [PATCH] Fix Typos and Improve Documentation Clarity (#5648) * Update p2p.go Signed-off-by: FT <140458077+zeevick10@users.noreply.github.com> * Update GPU-acceleration.md Signed-off-by: FT <140458077+zeevick10@users.noreply.github.com> --------- Signed-off-by: FT <140458077+zeevick10@users.noreply.github.com> --- core/p2p/p2p.go | 2 +- docs/content/docs/features/GPU-acceleration.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/p2p/p2p.go b/core/p2p/p2p.go index ce0dcc07..b4b5886b 100644 --- a/core/p2p/p2p.go +++ b/core/p2p/p2p.go @@ -221,7 +221,7 @@ func discoveryTunnels(ctx context.Context, n *node.Node, token, servicesID strin // - starts a service if not started, if the worker is Online // - checks that workers are Online, if not cancel the context of allocateLocalService // - discoveryTunnels should return all the nodes and addresses associated with it - // - the caller should take now care of the fact that we are always returning fresh informations + // - the caller should take now care of the fact that we are always returning fresh information go func() { for { select { diff --git a/docs/content/docs/features/GPU-acceleration.md b/docs/content/docs/features/GPU-acceleration.md index 0f429227..d02ff74d 100644 --- a/docs/content/docs/features/GPU-acceleration.md +++ b/docs/content/docs/features/GPU-acceleration.md @@ -249,7 +249,7 @@ This configuration has been tested on a 'custom' cluster managed by SUSE Rancher - When installing the ROCM kernel driver on your system ensure that you are installing an equal or newer version that that which is currently implemented in LocalAI (6.0.0 at time of writing). - AMD documentation indicates that this will ensure functionality however your milage may vary depending on the GPU and distro you are using. -- If you encounter an `Error 413` on attempting to upload an audio file or image for whisper or llava/bakllava on a k8s deployment, note that the ingress for your deployment may require the annontation `nginx.ingress.kubernetes.io/proxy-body-size: "25m"` to allow larger uploads. This may be included in future versions of the helm chart. +- If you encounter an `Error 413` on attempting to upload an audio file or image for whisper or llava/bakllava on a k8s deployment, note that the ingress for your deployment may require the annotation `nginx.ingress.kubernetes.io/proxy-body-size: "25m"` to allow larger uploads. This may be included in future versions of the helm chart. ## Intel acceleration (sycl) @@ -301,7 +301,7 @@ docker run -p 8080:8080 -e DEBUG=true -v $PWD/models:/build/models localai/local ### Notes -In addition to the commands to run LocalAI normally, you need to specify additonal flags to pass the GPU hardware to the container. +In addition to the commands to run LocalAI normally, you need to specify additional flags to pass the GPU hardware to the container. These flags are the same as the sections above, depending on the hardware, for [nvidia](#cudanvidia-acceleration), [AMD](#rocmamd-acceleration) or [Intel](#intel-acceleration-sycl). @@ -312,4 +312,4 @@ docker run -p 8080:8080 -e DEBUG=true -v $PWD/models:/build/models \ --gpus=all \ # nvidia passthrough --device /dev/dri --device /dev/kfd \ # AMD/Intel passthrough localai/localai:latest-vulkan-ffmpeg-core -``` \ No newline at end of file +```