From 7d0ac1ea3f5faf8047623f5cb92df23bdbd1f393 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 17 Jan 2025 09:35:10 +0100 Subject: [PATCH] chore(vall-e-x): Drop backend (#4619) There are many new architectures that are SOTA and replaces vall-e-x nowadays. Signed-off-by: Ettore Di Giacinto --- .github/dependabot.yml | 4 - .github/workflows/test-extra.yml | 20 --- Dockerfile | 7 +- Makefile | 13 +- backend/python/vall-e-x/.gitignore | 1 - backend/python/vall-e-x/Makefile | 33 ---- backend/python/vall-e-x/README.md | 5 - backend/python/vall-e-x/backend.py | 141 ------------------ backend/python/vall-e-x/install.sh | 22 --- backend/python/vall-e-x/requirements-cpu.txt | 3 - .../python/vall-e-x/requirements-cublas11.txt | 4 - .../python/vall-e-x/requirements-cublas12.txt | 3 - .../python/vall-e-x/requirements-hipblas.txt | 4 - .../python/vall-e-x/requirements-intel.txt | 7 - backend/python/vall-e-x/requirements.txt | 4 - backend/python/vall-e-x/run.sh | 6 - backend/python/vall-e-x/test.py | 81 ---------- backend/python/vall-e-x/test.sh | 7 - core/backend/options.go | 2 +- core/config/backend_config.go | 7 +- 20 files changed, 6 insertions(+), 368 deletions(-) delete mode 100644 backend/python/vall-e-x/.gitignore delete mode 100644 backend/python/vall-e-x/Makefile delete mode 100644 backend/python/vall-e-x/README.md delete mode 100644 backend/python/vall-e-x/backend.py delete mode 100755 backend/python/vall-e-x/install.sh delete mode 100644 backend/python/vall-e-x/requirements-cpu.txt delete mode 100644 backend/python/vall-e-x/requirements-cublas11.txt delete mode 100644 backend/python/vall-e-x/requirements-cublas12.txt delete mode 100644 backend/python/vall-e-x/requirements-hipblas.txt delete mode 100644 backend/python/vall-e-x/requirements-intel.txt delete mode 100644 backend/python/vall-e-x/requirements.txt delete mode 100755 backend/python/vall-e-x/run.sh delete mode 100644 backend/python/vall-e-x/test.py delete mode 100755 backend/python/vall-e-x/test.sh diff --git a/.github/dependabot.yml b/.github/dependabot.yml index fcd6c88c..8fa0cca5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -85,10 +85,6 @@ updates: directory: "/backend/python/transformers-musicgen" schedule: interval: "weekly" - - package-ecosystem: "pip" - directory: "/backend/python/vall-e-x" - schedule: - interval: "weekly" - package-ecosystem: "pip" directory: "/backend/python/vllm" schedule: diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index a2c34872..3c2fee37 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -260,26 +260,6 @@ jobs: # run: | # make --jobs=5 --output-sync=target -C backend/python/vllm # make --jobs=5 --output-sync=target -C backend/python/vllm test - tests-vallex: - runs-on: ubuntu-latest - steps: - - name: Clone - uses: actions/checkout@v4 - with: - submodules: true - - name: Dependencies - run: | - sudo apt-get update - sudo apt-get install build-essential ffmpeg - # Install UV - curl -LsSf https://astral.sh/uv/install.sh | sh - sudo apt-get install -y ca-certificates cmake curl patch python3-pip - sudo apt-get install -y libopencv-dev - pip install --user --no-cache-dir grpcio-tools==1.64.1 - - name: Test vall-e-x - run: | - make --jobs=5 --output-sync=target -C backend/python/vall-e-x - make --jobs=5 --output-sync=target -C backend/python/vall-e-x test tests-coqui: runs-on: ubuntu-latest diff --git a/Dockerfile b/Dockerfile index 481edf90..354ef298 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ ARG TARGETARCH ARG TARGETVARIANT ENV DEBIAN_FRONTEND=noninteractive -ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh" +ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh" RUN apt-get update && \ @@ -453,10 +453,7 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG make -C backend/python/transformers-musicgen \ ; fi -RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ - make -C backend/python/vall-e-x \ - ; fi && \ - if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ +RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ make -C backend/python/kokoro \ ; fi && \ if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ diff --git a/Makefile b/Makefile index 49c81950..1983f568 100644 --- a/Makefile +++ b/Makefile @@ -583,10 +583,10 @@ protogen-go-clean: $(RM) bin/* .PHONY: protogen-python -protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen kokoro-protogen vllm-protogen openvoice-protogen +protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen kokoro-protogen vllm-protogen openvoice-protogen .PHONY: protogen-python-clean -protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean +protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean .PHONY: autogptq-protogen autogptq-protogen: @@ -676,14 +676,6 @@ transformers-musicgen-protogen: transformers-musicgen-protogen-clean: $(MAKE) -C backend/python/transformers-musicgen protogen-clean -.PHONY: vall-e-x-protogen -vall-e-x-protogen: - $(MAKE) -C backend/python/vall-e-x protogen - -.PHONY: vall-e-x-protogen-clean -vall-e-x-protogen-clean: - $(MAKE) -C backend/python/vall-e-x protogen-clean - .PHONY: kokoro-protogen kokoro-protogen: $(MAKE) -C backend/python/kokoro protogen @@ -722,7 +714,6 @@ prepare-extra-conda-environments: protogen-python $(MAKE) -C backend/python/transformers $(MAKE) -C backend/python/transformers-musicgen $(MAKE) -C backend/python/parler-tts - $(MAKE) -C backend/python/vall-e-x $(MAKE) -C backend/python/kokoro $(MAKE) -C backend/python/openvoice $(MAKE) -C backend/python/exllama2 diff --git a/backend/python/vall-e-x/.gitignore b/backend/python/vall-e-x/.gitignore deleted file mode 100644 index 1d3a0654..00000000 --- a/backend/python/vall-e-x/.gitignore +++ /dev/null @@ -1 +0,0 @@ -source \ No newline at end of file diff --git a/backend/python/vall-e-x/Makefile b/backend/python/vall-e-x/Makefile deleted file mode 100644 index a3ca32a3..00000000 --- a/backend/python/vall-e-x/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -ifneq (,$(findstring sycl,$(BUILD_TYPE))) -export SKIP_CONDA=1 -endif - -.PHONY: ttsvalle -ttsvalle: protogen - bash install.sh - -.PHONY: run -run: protogen - @echo "Running ttsvalle..." - bash run.sh - @echo "ttsvalle run." - -.PHONY: test -test: protogen - @echo "Testing valle..." - bash test.sh - @echo "valle tested." - -.PHONY: protogen -protogen: backend_pb2_grpc.py backend_pb2.py - -.PHONY: protogen-clean -protogen-clean: - $(RM) backend_pb2_grpc.py backend_pb2.py - -backend_pb2_grpc.py backend_pb2.py: - python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto - -.PHONY: clean -clean: protogen-clean - rm -rf source venv __pycache__ \ No newline at end of file diff --git a/backend/python/vall-e-x/README.md b/backend/python/vall-e-x/README.md deleted file mode 100644 index a3a93361..00000000 --- a/backend/python/vall-e-x/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Creating a separate environment for the ttsvalle project - -``` -make ttsvalle -``` \ No newline at end of file diff --git a/backend/python/vall-e-x/backend.py b/backend/python/vall-e-x/backend.py deleted file mode 100644 index fc9d93bd..00000000 --- a/backend/python/vall-e-x/backend.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python3 - -from concurrent import futures -import argparse -import signal -import sys -import os -import time -import backend_pb2 -import backend_pb2_grpc - -import grpc - -from utils.generation import SAMPLE_RATE, generate_audio, preload_models -from scipy.io.wavfile import write as write_wav -from utils.prompt_making import make_prompt - -_ONE_DAY_IN_SECONDS = 60 * 60 * 24 - -# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 -MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) - -# Implement the BackendServicer class with the service methods -class BackendServicer(backend_pb2_grpc.BackendServicer): - """ - gRPC servicer for backend services. - """ - def Health(self, request, context): - """ - Health check service. - - Args: - request: A backend_pb2.HealthRequest instance. - context: A grpc.ServicerContext instance. - - Returns: - A backend_pb2.Reply instance with message "OK". - """ - return backend_pb2.Reply(message=bytes("OK", 'utf-8')) - - def LoadModel(self, request, context): - """ - Load model service. - - Args: - request: A backend_pb2.LoadModelRequest instance. - context: A grpc.ServicerContext instance. - - Returns: - A backend_pb2.Result instance with message "Model loaded successfully" and success=True if successful. - A backend_pb2.Result instance with success=False and error message if unsuccessful. - """ - model_name = request.Model - try: - print("Preparing models, please wait", file=sys.stderr) - # download and load all models - preload_models() - self.clonedVoice = False - # Assume directory from request.ModelFile. - # Only if request.LoraAdapter it's not an absolute path - if request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath): - # get base path of modelFile - modelFileBase = os.path.dirname(request.ModelFile) - # modify LoraAdapter to be relative to modelFileBase - request.AudioPath = os.path.join(modelFileBase, request.AudioPath) - if request.AudioPath != "": - print("Generating model", file=sys.stderr) - make_prompt(name=model_name, audio_prompt_path=request.AudioPath) - self.clonedVoice = True - ### Use given transcript - ##make_prompt(name=model_name, audio_prompt_path="paimon_prompt.wav", - ## transcript="Just, what was that? Paimon thought we were gonna get eaten.") - except Exception as err: - return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") - # Implement your logic here for the LoadModel service - # Replace this with your desired response - return backend_pb2.Result(message="Model loaded successfully", success=True) - - def TTS(self, request, context): - """ - Text-to-speech service. - - Args: - request: A backend_pb2.TTSRequest instance. - context: A grpc.ServicerContext instance. - - Returns: - A backend_pb2.Result instance with success=True if successful. - A backend_pb2.Result instance with success=False and error message if unsuccessful. - """ - model = request.model - print(request, file=sys.stderr) - try: - audio_array = None - if model != "": - if self.clonedVoice: - model = os.path.basename(request.model) - audio_array = generate_audio(request.text, prompt=model) - else: - audio_array = generate_audio(request.text) - print("saving to", request.dst, file=sys.stderr) - # save audio to disk - write_wav(request.dst, SAMPLE_RATE, audio_array) - print("saved to", request.dst, file=sys.stderr) - print("tts for", file=sys.stderr) - print(request, file=sys.stderr) - except Exception as err: - return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") - return backend_pb2.Result(success=True) - -def serve(address): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)) - backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server) - server.add_insecure_port(address) - server.start() - print("Server started. Listening on: " + address, file=sys.stderr) - - # Define the signal handler function - def signal_handler(sig, frame): - print("Received termination signal. Shutting down...") - server.stop(0) - sys.exit(0) - - # Set the signal handlers for SIGINT and SIGTERM - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - try: - while True: - time.sleep(_ONE_DAY_IN_SECONDS) - except KeyboardInterrupt: - server.stop(0) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run the gRPC server.") - parser.add_argument( - "--addr", default="localhost:50051", help="The address to bind the server to." - ) - args = parser.parse_args() - - serve(args.addr) diff --git a/backend/python/vall-e-x/install.sh b/backend/python/vall-e-x/install.sh deleted file mode 100755 index c0cce96a..00000000 --- a/backend/python/vall-e-x/install.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -e - -VALL_E_X_VERSION=3faaf8ccadb154d63b38070caf518ce9309ea0f4 - -source $(dirname $0)/../common/libbackend.sh - -# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links. -# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match. -# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index -# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index -if [ "x${BUILD_PROFILE}" == "xintel" ]; then - EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" -fi - -installRequirements - -git clone https://github.com/Plachtaa/VALL-E-X.git ${MY_DIR}/source -pushd ${MY_DIR}/source && git checkout -b build ${VALL_E_X_VERSION} && popd -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/source/requirements.txt - -cp -v ./*py $MY_DIR/source/ diff --git a/backend/python/vall-e-x/requirements-cpu.txt b/backend/python/vall-e-x/requirements-cpu.txt deleted file mode 100644 index 0aad8812..00000000 --- a/backend/python/vall-e-x/requirements-cpu.txt +++ /dev/null @@ -1,3 +0,0 @@ -accelerate -torch==2.4.1 -torchaudio==2.4.1 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-cublas11.txt b/backend/python/vall-e-x/requirements-cublas11.txt deleted file mode 100644 index c45de5b7..00000000 --- a/backend/python/vall-e-x/requirements-cublas11.txt +++ /dev/null @@ -1,4 +0,0 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 -accelerate -torch==2.4.1+cu118 -torchaudio==2.4.1+cu118 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-cublas12.txt b/backend/python/vall-e-x/requirements-cublas12.txt deleted file mode 100644 index 0aad8812..00000000 --- a/backend/python/vall-e-x/requirements-cublas12.txt +++ /dev/null @@ -1,3 +0,0 @@ -accelerate -torch==2.4.1 -torchaudio==2.4.1 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-hipblas.txt b/backend/python/vall-e-x/requirements-hipblas.txt deleted file mode 100644 index fc43790a..00000000 --- a/backend/python/vall-e-x/requirements-hipblas.txt +++ /dev/null @@ -1,4 +0,0 @@ ---extra-index-url https://download.pytorch.org/whl/rocm6.0 -accelerate -torch==2.3.0+rocm6.0 -torchaudio==2.3.0+rocm6.0 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-intel.txt b/backend/python/vall-e-x/requirements-intel.txt deleted file mode 100644 index efcf885a..00000000 --- a/backend/python/vall-e-x/requirements-intel.txt +++ /dev/null @@ -1,7 +0,0 @@ ---extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -intel-extension-for-pytorch==2.3.110+xpu -accelerate -torch==2.3.1+cxx11.abi -torchaudio==2.3.1+cxx11.abi -optimum[openvino] -oneccl_bind_pt==2.3.100+xpu \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements.txt b/backend/python/vall-e-x/requirements.txt deleted file mode 100644 index a1eea776..00000000 --- a/backend/python/vall-e-x/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -grpcio==1.69.0 -protobuf -certifi -setuptools \ No newline at end of file diff --git a/backend/python/vall-e-x/run.sh b/backend/python/vall-e-x/run.sh deleted file mode 100755 index 4b0682ad..00000000 --- a/backend/python/vall-e-x/run.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -BACKEND_FILE="${MY_DIR}/source/backend.py" - -source $(dirname $0)/../common/libbackend.sh - -startBackend $@ \ No newline at end of file diff --git a/backend/python/vall-e-x/test.py b/backend/python/vall-e-x/test.py deleted file mode 100644 index f31a148c..00000000 --- a/backend/python/vall-e-x/test.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -A test script to test the gRPC service -""" -import unittest -import subprocess -import time -import backend_pb2 -import backend_pb2_grpc - -import grpc - - -class TestBackendServicer(unittest.TestCase): - """ - TestBackendServicer is the class that tests the gRPC service - """ - def setUp(self): - """ - This method sets up the gRPC service by starting the server - """ - self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) - time.sleep(10) - - def tearDown(self) -> None: - """ - This method tears down the gRPC service by terminating the server - """ - self.service.terminate() - self.service.wait() - - def test_server_startup(self): - """ - This method tests if the server starts up successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.Health(backend_pb2.HealthMessage()) - self.assertEqual(response.message, b'OK') - except Exception as err: - print(err) - self.fail("Server failed to start") - finally: - self.tearDown() - - def test_load_model(self): - """ - This method tests if the model is loaded successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen")) - self.assertTrue(response.success) - self.assertEqual(response.message, "Model loaded successfully") - except Exception as err: - print(err) - self.fail("LoadModel service failed") - finally: - self.tearDown() - - def test_tts(self): - """ - This method tests if the embeddings are generated successfully - """ - try: - self.setUp() - with grpc.insecure_channel("localhost:50051") as channel: - stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen")) - self.assertTrue(response.success) - tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story") - tts_response = stub.TTS(tts_request) - self.assertIsNotNone(tts_response) - except Exception as err: - print(err) - self.fail("TTS service failed") - finally: - self.tearDown() \ No newline at end of file diff --git a/backend/python/vall-e-x/test.sh b/backend/python/vall-e-x/test.sh deleted file mode 100755 index 57336b39..00000000 --- a/backend/python/vall-e-x/test.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e -TEST_FILE="./source/test.py" - -source $(dirname $0)/../common/libbackend.sh - -runUnittests diff --git a/core/backend/options.go b/core/backend/options.go index f6247c60..92a42893 100644 --- a/core/backend/options.go +++ b/core/backend/options.go @@ -140,7 +140,7 @@ func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions { NBatch: int32(b), NoMulMatQ: c.NoMulMatQ, DraftModel: c.DraftModel, - AudioPath: c.VallE.AudioPath, + AudioPath: c.AudioPath, Quantization: c.Quantization, LoadFormat: c.LoadFormat, GPUMemoryUtilization: c.GPUMemoryUtilization, diff --git a/core/config/backend_config.go b/core/config/backend_config.go index f07ec3d3..bb2fa643 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -21,8 +21,7 @@ type TTSConfig struct { // Voice wav path or id Voice string `yaml:"voice"` - // Vall-e-x - VallE VallE `yaml:"vall-e"` + AudioPath string `yaml:"audio_path"` } type BackendConfig struct { @@ -82,10 +81,6 @@ type File struct { URI downloader.URI `yaml:"uri" json:"uri"` } -type VallE struct { - AudioPath string `yaml:"audio_path"` -} - type FeatureFlag map[string]*bool func (ff FeatureFlag) Enabled(s string) bool {