From 073eaec7295fe1fc5c9f2297fc6de6c0a85c36a1 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 23 Jan 2025 10:00:36 +0100 Subject: [PATCH] chore(openvoice): drop backend (#4673) The project (MeloTTS) has been quite since long, newer backends are much performant and better quality overall. Signed-off-by: Ettore Di Giacinto --- Dockerfile | 5 +- Makefile | 13 +- backend/python/openvoice/Makefile | 25 --- backend/python/openvoice/backend.py | 158 ------------------ backend/python/openvoice/install.sh | 16 -- backend/python/openvoice/requirements-cpu.txt | 7 - .../openvoice/requirements-cublas11.txt | 8 - .../openvoice/requirements-cublas12.txt | 7 - .../python/openvoice/requirements-hipblas.txt | 8 - .../python/openvoice/requirements-intel.txt | 24 --- backend/python/openvoice/requirements.txt | 17 -- backend/python/openvoice/run.sh | 4 - backend/python/openvoice/test.py | 82 --------- backend/python/openvoice/test.sh | 12 -- 14 files changed, 3 insertions(+), 383 deletions(-) delete mode 100644 backend/python/openvoice/Makefile delete mode 100755 backend/python/openvoice/backend.py delete mode 100755 backend/python/openvoice/install.sh delete mode 100644 backend/python/openvoice/requirements-cpu.txt delete mode 100644 backend/python/openvoice/requirements-cublas11.txt delete mode 100644 backend/python/openvoice/requirements-cublas12.txt delete mode 100644 backend/python/openvoice/requirements-hipblas.txt delete mode 100644 backend/python/openvoice/requirements-intel.txt delete mode 100644 backend/python/openvoice/requirements.txt delete mode 100755 backend/python/openvoice/run.sh delete mode 100644 backend/python/openvoice/test.py delete mode 100755 backend/python/openvoice/test.sh diff --git a/Dockerfile b/Dockerfile index 625d2869..566e03bc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ ARG TARGETARCH ARG TARGETVARIANT ENV DEBIAN_FRONTEND=noninteractive -ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh" +ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh" RUN apt-get update && \ apt-get install -y --no-install-recommends \ @@ -420,9 +420,6 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ make -C backend/python/kokoro \ ; fi && \ - if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ - make -C backend/python/openvoice \ - ; fi && \ if [[ ( "${EXTRA_BACKENDS}" =~ "exllama2" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ make -C backend/python/exllama2 \ ; fi && \ diff --git a/Makefile b/Makefile index 04e280d8..9c4f3778 100644 --- a/Makefile +++ b/Makefile @@ -533,10 +533,10 @@ protogen-go-clean: $(RM) bin/* .PHONY: protogen-python -protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen openvoice-protogen faster-whisper-protogen +protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen .PHONY: protogen-python-clean -protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean faster-whisper-protogen-clean +protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean .PHONY: autogptq-protogen autogptq-protogen: @@ -610,14 +610,6 @@ kokoro-protogen: kokoro-protogen-clean: $(MAKE) -C backend/python/kokoro protogen-clean -.PHONY: openvoice-protogen -openvoice-protogen: - $(MAKE) -C backend/python/openvoice protogen - -.PHONY: openvoice-protogen-clean -openvoice-protogen-clean: - $(MAKE) -C backend/python/openvoice protogen-clean - .PHONY: vllm-protogen vllm-protogen: $(MAKE) -C backend/python/vllm protogen @@ -638,7 +630,6 @@ prepare-extra-conda-environments: protogen-python $(MAKE) -C backend/python/rerankers $(MAKE) -C backend/python/transformers $(MAKE) -C backend/python/kokoro - $(MAKE) -C backend/python/openvoice $(MAKE) -C backend/python/exllama2 prepare-test-extra: protogen-python diff --git a/backend/python/openvoice/Makefile b/backend/python/openvoice/Makefile deleted file mode 100644 index a187a00f..00000000 --- a/backend/python/openvoice/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -.DEFAULT_GOAL := install - -.PHONY: install -install: protogen - bash install.sh - -.PHONY: protogen -protogen: backend_pb2_grpc.py backend_pb2.py - -.PHONY: protogen-clean -protogen-clean: - $(RM) backend_pb2_grpc.py backend_pb2.py - -backend_pb2_grpc.py backend_pb2.py: - python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto - -.PHONY: clean -clean: protogen-clean - rm -rf venv __pycache__ - -.PHONY: test -test: protogen - @echo "Testing openvoice..." - bash test.sh - @echo "openvoice tested." \ No newline at end of file diff --git a/backend/python/openvoice/backend.py b/backend/python/openvoice/backend.py deleted file mode 100755 index 7dde08cf..00000000 --- a/backend/python/openvoice/backend.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python3 -""" -Extra gRPC server for OpenVoice models. -""" -from concurrent import futures - -import argparse -import signal -import sys -import os -import torch -from openvoice import se_extractor -from openvoice.api import ToneColorConverter -from melo.api import TTS - -import time -import backend_pb2 -import backend_pb2_grpc - -import grpc - - -_ONE_DAY_IN_SECONDS = 60 * 60 * 24 - -# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 -MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) - -# Implement the BackendServicer class with the service methods -class BackendServicer(backend_pb2_grpc.BackendServicer): - """ - A gRPC servicer for the backend service. - - This class implements the gRPC methods for the backend service, including Health, LoadModel, and Embedding. - """ - def Health(self, request, context): - """ - A gRPC method that returns the health status of the backend service. - - Args: - request: A HealthRequest object that contains the request parameters. - context: A grpc.ServicerContext object that provides information about the RPC. - - Returns: - A Reply object that contains the health status of the backend service. - """ - return backend_pb2.Reply(message=bytes("OK", 'utf-8')) - - def LoadModel(self, request, context): - """ - A gRPC method that loads a model into memory. - - Args: - request: A LoadModelRequest object that contains the request parameters. - context: A grpc.ServicerContext object that provides information about the RPC. - - Returns: - A Result object that contains the result of the LoadModel operation. - """ - model_name = request.Model - try: - - self.clonedVoice = False - # Assume directory from request.ModelFile. - # Only if request.LoraAdapter it's not an absolute path - if request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath): - # get base path of modelFile - modelFileBase = os.path.dirname(request.ModelFile) - request.AudioPath = os.path.join(modelFileBase, request.AudioPath) - if request.AudioPath != "": - self.clonedVoice = True - - self.modelpath = request.ModelFile - self.speaker = request.Type - self.ClonedVoicePath = request.AudioPath - - ckpt_converter = request.Model+'/converter' - device = "cuda:0" if torch.cuda.is_available() else "cpu" - self.device = device - self.tone_color_converter = None - if self.clonedVoice: - self.tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device) - self.tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth') - - except Exception as err: - return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") - - return backend_pb2.Result(message="Model loaded successfully", success=True) - - def TTS(self, request, context): - model_name = request.model - if model_name == "": - return backend_pb2.Result(success=False, message="request.model is required") - try: - # Speed is adjustable - speed = 1.0 - voice = "EN" - if request.voice: - voice = request.voice - model = TTS(language=voice, device=self.device) - speaker_ids = model.hps.data.spk2id - speaker_key = self.speaker - modelpath = self.modelpath - for s in speaker_ids.keys(): - print(f"Speaker: {s} - ID: {speaker_ids[s]}") - speaker_id = speaker_ids[speaker_key] - speaker_key = speaker_key.lower().replace('_', '-') - source_se = torch.load(f'{modelpath}/base_speakers/ses/{speaker_key}.pth', map_location=self.device) - model.tts_to_file(request.text, speaker_id, request.dst, speed=speed) - if self.clonedVoice: - reference_speaker = self.ClonedVoicePath - target_se, audio_name = se_extractor.get_se(reference_speaker, self.tone_color_converter, vad=False) - # Run the tone color converter - encode_message = "@MyShell" - self.tone_color_converter.convert( - audio_src_path=request.dst, - src_se=source_se, - tgt_se=target_se, - output_path=request.dst, - message=encode_message) - - print("[OpenVoice] TTS generated!", file=sys.stderr) - print("[OpenVoice] TTS saved to", request.dst, file=sys.stderr) - print(request, file=sys.stderr) - except Exception as err: - return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") - return backend_pb2.Result(success=True) - -def serve(address): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)) - backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server) - server.add_insecure_port(address) - server.start() - print("[OpenVoice] Server started. Listening on: " + address, file=sys.stderr) - - # Define the signal handler function - def signal_handler(sig, frame): - print("[OpenVoice] Received termination signal. Shutting down...") - server.stop(0) - sys.exit(0) - - # Set the signal handlers for SIGINT and SIGTERM - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - try: - while True: - time.sleep(_ONE_DAY_IN_SECONDS) - except KeyboardInterrupt: - server.stop(0) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run the gRPC server.") - parser.add_argument( - "--addr", default="localhost:50051", help="The address to bind the server to." - ) - args = parser.parse_args() - print(f"[OpenVoice] startup: {args}", file=sys.stderr) - serve(args.addr) diff --git a/backend/python/openvoice/install.sh b/backend/python/openvoice/install.sh deleted file mode 100755 index 24db146b..00000000 --- a/backend/python/openvoice/install.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -source $(dirname $0)/../common/libbackend.sh - -# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links. -# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match. -# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index -# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index -if [ "x${BUILD_PROFILE}" == "xintel" ]; then - EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" -fi - -installRequirements - -python -m unidic download diff --git a/backend/python/openvoice/requirements-cpu.txt b/backend/python/openvoice/requirements-cpu.txt deleted file mode 100644 index dd2eb221..00000000 --- a/backend/python/openvoice/requirements-cpu.txt +++ /dev/null @@ -1,7 +0,0 @@ -torch==2.4.1 -git+https://github.com/myshell-ai/MeloTTS.git -git+https://github.com/myshell-ai/OpenVoice.git -whisper-timestamped -pydub==0.25.1 -wavmark==0.0.3 -eng_to_ipa==0.0.2 \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cublas11.txt b/backend/python/openvoice/requirements-cublas11.txt deleted file mode 100644 index 84ecc344..00000000 --- a/backend/python/openvoice/requirements-cublas11.txt +++ /dev/null @@ -1,8 +0,0 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 -torch==2.4.1+cu118 -git+https://github.com/myshell-ai/MeloTTS.git -git+https://github.com/myshell-ai/OpenVoice.git -whisper-timestamped -pydub==0.25.1 -wavmark==0.0.3 -eng_to_ipa==0.0.2 \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cublas12.txt b/backend/python/openvoice/requirements-cublas12.txt deleted file mode 100644 index dd2eb221..00000000 --- a/backend/python/openvoice/requirements-cublas12.txt +++ /dev/null @@ -1,7 +0,0 @@ -torch==2.4.1 -git+https://github.com/myshell-ai/MeloTTS.git -git+https://github.com/myshell-ai/OpenVoice.git -whisper-timestamped -pydub==0.25.1 -wavmark==0.0.3 -eng_to_ipa==0.0.2 \ No newline at end of file diff --git a/backend/python/openvoice/requirements-hipblas.txt b/backend/python/openvoice/requirements-hipblas.txt deleted file mode 100644 index 4c2d6649..00000000 --- a/backend/python/openvoice/requirements-hipblas.txt +++ /dev/null @@ -1,8 +0,0 @@ ---extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch==2.4.1+rocm6.0 -git+https://github.com/myshell-ai/MeloTTS.git -git+https://github.com/myshell-ai/OpenVoice.git -whisper-timestamped -pydub==0.25.1 -wavmark==0.0.3 -eng_to_ipa==0.0.2 \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt deleted file mode 100644 index 39b2b8b0..00000000 --- a/backend/python/openvoice/requirements-intel.txt +++ /dev/null @@ -1,24 +0,0 @@ ---extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -intel-extension-for-pytorch==2.3.110+xpu -torch==2.3.1+cxx11.abi -torchaudio==2.3.1+cxx11.abi -oneccl_bind_pt==2.3.100+xpu -optimum[openvino] -grpcio==1.69.0 -protobuf -librosa==0.9.1 -faster-whisper==0.9.0 -pydub==0.25.1 -wavmark==0.0.3 -eng_to_ipa==0.0.2 -inflect==7.0.0 -unidecode==1.3.7 -whisper-timestamped==1.14.2 -openai -python-dotenv -pypinyin==0.50.0 -cn2an==0.5.22 -jieba==0.42.1 -langid==1.1.6 -git+https://github.com/myshell-ai/MeloTTS.git -git+https://github.com/myshell-ai/OpenVoice.git diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt deleted file mode 100644 index 62b886bb..00000000 --- a/backend/python/openvoice/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -grpcio==1.69.0 -protobuf -librosa -faster-whisper -inflect -unidecode -openai -python-dotenv -pypinyin -cn2an==0.5.22 -numpy==1.22.0 -networkx==2.8.8 -jieba==0.42.1 -gradio==5.9.1 -langid==1.1.6 -llvmlite==0.43.0 -setuptools \ No newline at end of file diff --git a/backend/python/openvoice/run.sh b/backend/python/openvoice/run.sh deleted file mode 100755 index 375c07e5..00000000 --- a/backend/python/openvoice/run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -source $(dirname $0)/../common/libbackend.sh - -startBackend $@ \ No newline at end of file diff --git a/backend/python/openvoice/test.py b/backend/python/openvoice/test.py deleted file mode 100644 index 82f08785..00000000 --- a/backend/python/openvoice/test.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -A test script to test the gRPC service -""" -import unittest -import subprocess -import time -import backend_pb2 -import backend_pb2_grpc - -import grpc - - -class TestBackendServicer(unittest.TestCase): - """ - TestBackendServicer is the class that tests the gRPC service - """ - def setUp(self): - """ - This method sets up the gRPC service by starting the server - """ - self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) - time.sleep(30) - - def tearDown(self) -> None: - """ - This method tears down the gRPC service by terminating the server - """ - self.service.terminate() - self.service.wait() - - def test_server_startup(self): - """ - This method tests if the server starts up successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.Health(backend_pb2.HealthMessage()) - self.assertEqual(response.message, b'OK') - except Exception as err: - print(err) - self.fail("Server failed to start") - finally: - self.tearDown() - - def test_load_model(self): - """ - This method tests if the model is loaded successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="checkpoints_v2", - Type="en-us")) - self.assertTrue(response.success) - self.assertEqual(response.message, "Model loaded successfully") - except Exception as err: - print(err) - self.fail("LoadModel service failed") - finally: - self.tearDown() - - def test_tts(self): - """ - This method tests if the embeddings are generated successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen")) - self.assertTrue(response.success) - tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story", voice="EN") - tts_response = stub.TTS(tts_request) - self.assertIsNotNone(tts_response) - except Exception as err: - print(err) - self.fail("TTS service failed") - finally: - self.tearDown() \ No newline at end of file diff --git a/backend/python/openvoice/test.sh b/backend/python/openvoice/test.sh deleted file mode 100755 index 6c0a840f..00000000 --- a/backend/python/openvoice/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -source $(dirname $0)/../common/libbackend.sh - -# Download checkpoints if not present -if [ ! -d "checkpoints_v2" ]; then - wget https://myshell-public-repo-host.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip -O checkpoints_v2.zip - unzip checkpoints_v2.zip -fi - -runUnittests