diff --git a/docs/content/docs/advanced/installer.md b/docs/content/docs/advanced/installer.md index 3770022b..4cd15a94 100644 --- a/docs/content/docs/advanced/installer.md +++ b/docs/content/docs/advanced/installer.md @@ -29,5 +29,9 @@ List of the Environment Variables: | **THREADS** | Number of processor threads the application should use. Defaults to the number of logical cores minus one. | | **VERSION** | Specifies the version of LocalAI to install. Defaults to the latest available version. | | **MODELS_PATH** | Directory path where LocalAI models are stored (default is /usr/share/local-ai/models). | +| **P2P_TOKEN** | Token to use for the federation or for starting workers see [documentation]({{%relref "docs/features/distributed_inferencing" %}}) | +| **WORKER** | Set to "true" to make the instance a worker (p2p token is required see [documentation]({{%relref "docs/features/distributed_inferencing" %}})) | +| **FEDERATED** | Set to "true" to share the instance with the federation (p2p token is required see [documentation]({{%relref "docs/features/distributed_inferencing" %}})) | +| **FEDERATED_SERVER** | Set to "true" to run the instance as a federation server which forwards requests to the federation (p2p token is required see [documentation]({{%relref "docs/features/distributed_inferencing" %}})) | We are looking into improving the installer, and as this is a first iteration any feedback is welcome! Open up an [issue](https://github.com/mudler/LocalAI/issues/new/choose) if something doesn't work for you! \ No newline at end of file diff --git a/docs/static/install.sh b/docs/static/install.sh index 61be710b..3209b24e 100644 --- a/docs/static/install.sh +++ b/docs/static/install.sh @@ -78,6 +78,9 @@ API_KEY=${API_KEY:-} CORE_IMAGES=${CORE_IMAGES:-false} P2P_TOKEN=${P2P_TOKEN:-} WORKER=${WORKER:-false} +FEDERATED=${FEDERATED:-false} +FEDERATED_SERVER=${FEDERATED_SERVER:-false} + # nprocs -1 if available nproc; then procs=$(nproc) @@ -134,14 +137,6 @@ configure_systemd() { info "Adding current user to local-ai group..." $SUDO usermod -a -G local-ai $(whoami) - STARTCOMMAND="run" - if [ "$WORKER" = true ]; then - if [ -n "$P2P_TOKEN" ]; then - STARTCOMMAND="worker p2p-llama-cpp-rpc" - else - STARTCOMMAND="worker llama-cpp-rpc" - fi - fi info "Creating local-ai systemd service..." cat </dev/null [Unit] @@ -173,6 +168,10 @@ EOF $SUDO echo "LOCALAI_P2P=true" | $SUDO tee -a /etc/localai.env >/dev/null fi + if [ "$LOCALAI_P2P_DISABLE_DHT" = true ]; then + $SUDO echo "LOCALAI_P2P_DISABLE_DHT=true" | $SUDO tee -a /etc/localai.env >/dev/null + fi + SYSTEMCTL_RUNNING="$(systemctl is-system-running || true)" case $SYSTEMCTL_RUNNING in running|degraded) @@ -421,18 +420,13 @@ install_docker() { # exit 0 fi - STARTCOMMAND="run" - if [ "$WORKER" = true ]; then - if [ -n "$P2P_TOKEN" ]; then - STARTCOMMAND="worker p2p-llama-cpp-rpc" - else - STARTCOMMAND="worker llama-cpp-rpc" - fi - fi envs="" if [ -n "$P2P_TOKEN" ]; then envs="-e LOCALAI_P2P_TOKEN=$P2P_TOKEN -e LOCALAI_P2P=true" fi + if [ "$LOCALAI_P2P_DISABLE_DHT" = true ]; then + envs="$envs -e LOCALAI_P2P_DISABLE_DHT=true" + fi IMAGE_TAG= if [ "$HAS_CUDA" ]; then @@ -604,6 +598,28 @@ install_binary() { exit 0 } +detect_start_command() { + STARTCOMMAND="run" + if [ "$WORKER" = true ]; then + if [ -n "$P2P_TOKEN" ]; then + STARTCOMMAND="worker p2p-llama-cpp-rpc" + else + STARTCOMMAND="worker llama-cpp-rpc" + fi + elif [ "$FEDERATED" = true ]; then + if [ "$FEDERATED_SERVER" = true ]; then + STARTCOMMAND="federated" + else + STARTCOMMAND="$STARTCOMMAND --p2p --federated" + fi + elif [ -n "$P2P_TOKEN" ]; then + STARTCOMMAND="$STARTCOMMAND --p2p" + fi +} + + +detect_start_command + OS="$(uname -s)" ARCH=$(uname -m)