From d19bea4af24a449be438bdf3e6ee5095ece68137 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 12:27:37 +0200 Subject: [PATCH] chore(vllm): do not install from source (#3745) chore(vllm): do not install from source by default Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/test.py | 2 +- backend/python/vllm/install.sh | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/python/openvoice/test.py b/backend/python/openvoice/test.py index 262917b3..82f08785 100644 --- a/backend/python/openvoice/test.py +++ b/backend/python/openvoice/test.py @@ -19,7 +19,7 @@ class TestBackendServicer(unittest.TestCase): This method sets up the gRPC service by starting the server """ self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) - time.sleep(10) + time.sleep(30) def tearDown(self) -> None: """ diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 022cf8bf..9078b81b 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -13,7 +13,9 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi -if [ "x${BUILD_TYPE}" == "x" ]; then +# We don't embed this into the images as it is a large dependency and not always needed. +# Besides, the speed inference are not actually usable in the current state for production use-cases. +if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then ensureVenv # https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html if [ ! -d vllm ]; then