diff --git a/docs/content/docs/features/distributed_inferencing.md b/docs/content/docs/features/distributed_inferencing.md index b3b84528..e7220a81 100644 --- a/docs/content/docs/features/distributed_inferencing.md +++ b/docs/content/docs/features/distributed_inferencing.md @@ -20,7 +20,7 @@ This functionality enables LocalAI to distribute inference requests across multi To start workers for distributing the computational load, run: ```bash -local-ai llamacpp-worker +local-ai worker llama-cpp-rpc ``` Alternatively, you can build the RPC server following the llama.cpp [README](https://github.com/ggerganov/llama.cpp/blob/master/examples/rpc/README.md), which is compatible with LocalAI. @@ -71,7 +71,7 @@ To reuse the same token later, restart the server with `--p2ptoken` or `P2P_TOKE 2. Start the workers. Copy the `local-ai` binary to other hosts and run as many workers as needed using the token: ```bash -TOKEN=XXX ./local-ai p2p-llama-cpp-rpc +TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc # 1:06AM INF loading environment variables from file envFile=.env # 1:06AM INF Setting logging to info # {"level":"INFO","time":"2024-05-19T01:06:01.794+0200","caller":"config/config.go:288","message":"connmanager disabled\n"} diff --git a/docs/content/docs/features/gpt-vision.md b/docs/content/docs/features/gpt-vision.md index 9e021273..1fc4307f 100644 --- a/docs/content/docs/features/gpt-vision.md +++ b/docs/content/docs/features/gpt-vision.md @@ -1,7 +1,7 @@ +++ disableToc = false -title = "🆕 GPT Vision" +title = "🥽 GPT Vision" weight = 14 url = "/features/gpt-vision/" +++