mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-21 10:14:59 +00:00

Some checks are pending
tests / tests-linux (1.21.x) (push) Waiting to run
Explorer deployment / build-linux (push) Waiting to run
GPU tests / ubuntu-latest (1.21.x) (push) Waiting to run
generate and publish intel docker caches / generate_caches (intel/oneapi-basekit:2025.1.0-0-devel-ubuntu22.04, linux/amd64, ubuntu-latest) (push) Waiting to run
build container images / hipblas-jobs (-aio-gpu-hipblas, rocm/dev-ubuntu-22.04:6.1, hipblas, true, ubuntu:22.04, latest-gpu-hipblas, latest-aio-gpu-hipblas, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -hipblas) (push) Waiting to run
build container images / core-image-build (-aio-cpu, ubuntu:22.04, , true, latest-cpu, latest-aio-cpu, --jobs=4 --output-sync=target, linux/amd64,linux/arm64, arc-runner-set, false, auto, ) (push) Waiting to run
build container images / core-image-build (-aio-gpu-intel-f16, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, true, ubuntu:22.04, latest-gpu-intel-f16, latest-aio-gpu-intel-f16, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f16) (push) Waiting to run
build container images / core-image-build (-aio-gpu-intel-f32, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, true, ubuntu:22.04, latest-gpu-intel-f32, latest-aio-gpu-intel-f32, --jobs=3 --output-sync=target, linux/amd64, arc-runner-set, false, -sycl-f32) (push) Waiting to run
build container images / core-image-build (-aio-gpu-nvidia-cuda-11, ubuntu:22.04, cublas, 11, 7, true, latest-gpu-nvidia-cuda-11, latest-aio-gpu-nvidia-cuda-11, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda11) (push) Waiting to run
build container images / core-image-build (-aio-gpu-nvidia-cuda-12, ubuntu:22.04, cublas, 12, 0, true, latest-gpu-nvidia-cuda-12, latest-aio-gpu-nvidia-cuda-12, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -cublas-cuda12) (push) Waiting to run
build container images / core-image-build (-aio-gpu-vulkan, ubuntu:22.04, vulkan, true, latest-gpu-vulkan, latest-aio-gpu-vulkan, --jobs=4 --output-sync=target, linux/amd64, arc-runner-set, false, false, -vulkan) (push) Waiting to run
build container images / gh-runner (nvcr.io/nvidia/l4t-jetpack:r36.4.0, cublas, 12, 0, true, latest-nvidia-l4t-arm64, --jobs=4 --output-sync=target, linux/arm64, ubuntu-24.04-arm, true, false, -nvidia-l4t-arm64) (push) Waiting to run
build python backend container images / backend-jobs (bark, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-bark, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-bark) (push) Waiting to run
build python backend container images / backend-jobs (chatterbox, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-chatterbox, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-chatterbox) (push) Waiting to run
build python backend container images / backend-jobs (chatterbox, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-chatterbox, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-chatterbox) (push) Waiting to run
build python backend container images / backend-jobs (coqui, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-coqui, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-coqui) (push) Waiting to run
build python backend container images / backend-jobs (bark, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-bark, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-bark) (push) Waiting to run
build python backend container images / backend-jobs (bark, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-bark, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-bark) (push) Waiting to run
build python backend container images / backend-jobs (bark, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-bark, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-bark) (push) Waiting to run
build python backend container images / backend-jobs (bark, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-bark, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-bark) (push) Waiting to run
tests / tests-aio-container (push) Waiting to run
tests / tests-apple (1.21.x) (push) Waiting to run
build python backend container images / backend-jobs (coqui, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-coqui, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-coqui) (push) Waiting to run
build python backend container images / backend-jobs (coqui, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-coqui, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-coqui) (push) Waiting to run
build python backend container images / backend-jobs (coqui, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-coqui, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-coqui) (push) Waiting to run
build python backend container images / backend-jobs (coqui, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-coqui, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-coqui) (push) Waiting to run
build python backend container images / backend-jobs (diffusers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-diffusers, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-diffusers) (push) Waiting to run
build python backend container images / backend-jobs (diffusers, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-diffusers, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-diffusers) (push) Waiting to run
build python backend container images / backend-jobs (diffusers, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-diffusers, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-diffusers) (push) Waiting to run
build python backend container images / backend-jobs (diffusers, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-diffusers, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-diffusers) (push) Waiting to run
build python backend container images / backend-jobs (faster-whisper, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-faster-whisper, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-faster-whisper) (push) Waiting to run
build python backend container images / backend-jobs (faster-whisper, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-faster-whisper, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-faster-whisper) (push) Waiting to run
build python backend container images / backend-jobs (faster-whisper, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-faster-whisper, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-faster-whisper) (push) Waiting to run
build python backend container images / backend-jobs (faster-whisper, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-faster-whisper, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-faster-whisper) (push) Waiting to run
build python backend container images / backend-jobs (faster-whisper, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-faster-whisper, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-faster-whisper) (push) Waiting to run
build python backend container images / backend-jobs (kokoro, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-kokoro, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-kokoro) (push) Waiting to run
build python backend container images / backend-jobs (kokoro, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-kokoro, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-kokoro) (push) Waiting to run
build python backend container images / backend-jobs (kokoro, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-kokoro, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-kokoro) (push) Waiting to run
build python backend container images / backend-jobs (kokoro, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-kokoro, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-kokoro) (push) Waiting to run
build python backend container images / backend-jobs (kokoro, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-kokoro, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-kokoro) (push) Waiting to run
build python backend container images / backend-jobs (rerankers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-rerankers, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-rerankers) (push) Waiting to run
build python backend container images / backend-jobs (rerankers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-rerankers, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-rerankers) (push) Waiting to run
build python backend container images / backend-jobs (rerankers, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-rerankers, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-rerankers) (push) Waiting to run
build python backend container images / backend-jobs (rerankers, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-rerankers, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-rerankers) (push) Waiting to run
build python backend container images / backend-jobs (rerankers, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-rerankers, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-rerankers) (push) Waiting to run
build python backend container images / backend-jobs (transformers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-transformers, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-transformers) (push) Waiting to run
build python backend container images / backend-jobs (transformers, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-transformers, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-transformers) (push) Waiting to run
build python backend container images / backend-jobs (transformers, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-transformers, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-transformers) (push) Waiting to run
build python backend container images / backend-jobs (transformers, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-transformers, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-transformers) (push) Waiting to run
build python backend container images / backend-jobs (transformers, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-transformers, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-transformers) (push) Waiting to run
build python backend container images / backend-jobs (vllm, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f16, , , latest-gpu-intel-sycl-f16-vllm, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f16-vllm) (push) Waiting to run
build python backend container images / backend-jobs (vllm, quay.io/go-skynet/intel-oneapi-base:latest, sycl_f32, , , latest-gpu-intel-sycl-f32-vllm, linux/amd64, ubuntu-latest, true, -gpu-intel-sycl-f32-vllm) (push) Waiting to run
build python backend container images / backend-jobs (vllm, rocm/dev-ubuntu-22.04:6.1, hipblas, , , latest-gpu-rocm-hipblas-vllm, linux/amd64, ubuntu-latest, true, -gpu-rocm-hipblas-vllm) (push) Waiting to run
build python backend container images / backend-jobs (vllm, ubuntu:22.04, cublas, 11, 7, latest-gpu-nvidia-cuda-11-vllm, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-11-vllm) (push) Waiting to run
build python backend container images / backend-jobs (vllm, ubuntu:22.04, cublas, 12, 0, latest-gpu-nvidia-cuda-12-vllm, linux/amd64, ubuntu-latest, true, -gpu-nvidia-cuda-12-vllm) (push) Waiting to run
Security Scan / tests (push) Waiting to run
Tests extras backends / tests-transformers (push) Waiting to run
Tests extras backends / tests-rerankers (push) Waiting to run
Tests extras backends / tests-diffusers (push) Waiting to run
Tests extras backends / tests-coqui (push) Waiting to run
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
314 lines
No EOL
11 KiB
YAML
314 lines
No EOL
11 KiB
YAML
## vLLM
|
|
- &vllm
|
|
name: "cuda11-vllm"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-vllm"
|
|
license: apache-2.0
|
|
urls:
|
|
- https://github.com/vllm-project/vllm
|
|
tags:
|
|
- text-to-text
|
|
- multimodal
|
|
- GPTQ
|
|
- AWQ
|
|
- AutoRound
|
|
- INT4
|
|
- INT8
|
|
- FP8
|
|
icon: https://raw.githubusercontent.com/vllm-project/vllm/main/docs/assets/logos/vllm-logo-text-dark.png
|
|
description: |
|
|
vLLM is a fast and easy-to-use library for LLM inference and serving.
|
|
Originally developed in the Sky Computing Lab at UC Berkeley, vLLM has evolved into a community-driven project with contributions from both academia and industry.
|
|
vLLM is fast with:
|
|
State-of-the-art serving throughput
|
|
Efficient management of attention key and value memory with PagedAttention
|
|
Continuous batching of incoming requests
|
|
Fast model execution with CUDA/HIP graph
|
|
Quantizations: GPTQ, AWQ, AutoRound, INT4, INT8, and FP8
|
|
Optimized CUDA kernels, including integration with FlashAttention and FlashInfer
|
|
Speculative decoding
|
|
Chunked prefill
|
|
alias: "vllm"
|
|
- !!merge <<: *vllm
|
|
name: "cuda12-vllm"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "rocm-vllm"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "intel-sycl-f32-vllm"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "intel-sycl-f16-vllm"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "cuda11-vllm-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "cuda12-vllm-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "rocm-vllm-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "intel-sycl-f32-vllm-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-vllm"
|
|
- !!merge <<: *vllm
|
|
name: "intel-sycl-f16-vllm-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-vllm"
|
|
## Rerankers
|
|
- name: "cuda11-rerankers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-rerankers"
|
|
alias: "cuda11-rerankers"
|
|
- name: "cuda12-rerankers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-rerankers"
|
|
alias: "cuda12-rerankers"
|
|
- name: "intel-sycl-f32-rerankers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-rerankers"
|
|
alias: "intel-sycl-f32-rerankers"
|
|
- name: "intel-sycl-f16-rerankers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-rerankers"
|
|
alias: "intel-sycl-f16-rerankers"
|
|
- name: "rocm-rerankers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-rerankers"
|
|
alias: "rocm-rerankers"
|
|
- name: "cuda11-rerankers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-rerankers"
|
|
alias: "rerankers"
|
|
|
|
- name: "cuda12-rerankers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-rerankers"
|
|
alias: "rerankers"
|
|
- name: "rocm-rerankers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-rerankers"
|
|
alias: "rerankers"
|
|
|
|
- name: "intel-sycl-f32-rerankers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-rerankers"
|
|
alias: "rerankers"
|
|
|
|
- name: "intel-sycl-f16-rerankers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-rerankers"
|
|
alias: "rerankers"
|
|
## Transformers
|
|
- name: "cuda12-transformers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-transformers"
|
|
alias: "cuda12-transformers"
|
|
- name: "rocm-transformers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-transformers"
|
|
alias: "rocm-transformers"
|
|
- name: "intel-sycl-f32-transformers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-transformers"
|
|
alias: "intel-sycl-f32-transformers"
|
|
|
|
- name: "intel-sycl-f16-transformers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-transformers"
|
|
alias: "intel-sycl-f16-transformers"
|
|
- name: "cuda11-transformers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-transformers"
|
|
alias: "transformers"
|
|
- name: "cuda11-transformers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-transformers"
|
|
alias: "cuda11-transformers"
|
|
|
|
|
|
- name: "cuda12-transformers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-transformers"
|
|
alias: "transformers"
|
|
|
|
|
|
- name: "rocm-transformers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-transformers"
|
|
alias: "transformers"
|
|
|
|
|
|
|
|
- name: "intel-sycl-f32-transformers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-transformers"
|
|
alias: "transformers"
|
|
|
|
- name: "intel-sycl-f16-transformers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-transformers"
|
|
alias: "transformers"
|
|
## Diffusers
|
|
- name: "cuda12-diffusers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-diffusers"
|
|
alias: "cuda12-diffusers"
|
|
- name: "rocm-diffusers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-diffusers"
|
|
alias: "rocm-diffusers"
|
|
- name: "cuda11-diffusers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-diffusers"
|
|
alias: "cuda11-diffusers"
|
|
|
|
|
|
- name: "intel-sycl-f32-diffusers"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-diffusers"
|
|
alias: "intel-sycl-f32-diffusers"
|
|
|
|
- name: "cuda11-diffusers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-diffusers"
|
|
alias: "diffusers"
|
|
|
|
- name: "cuda12-diffusers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-diffusers"
|
|
alias: "diffusers"
|
|
|
|
- name: "rocm-diffusers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-diffusers"
|
|
alias: "diffusers"
|
|
|
|
- name: "intel-sycl-f32-diffusers-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-diffusers"
|
|
alias: "diffusers"
|
|
|
|
## exllama2
|
|
- name: "cuda11-exllama2"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-exllama2"
|
|
alias: "cuda11-exllama2"
|
|
- name: "cuda12-exllama2"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
|
|
alias: "cuda12-exllama2"
|
|
|
|
- name: "cuda11-exllama2-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-exllama2"
|
|
alias: "exllama2"
|
|
|
|
|
|
- name: "cuda12-exllama2-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
|
|
alias: "exllama2"
|
|
|
|
## kokoro
|
|
- name: "cuda11-kokoro-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro"
|
|
alias: "kokoro"
|
|
|
|
- name: "cuda12-kokoro-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
|
|
alias: "kokoro"
|
|
|
|
- name: "rocm-kokoro-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-kokoro"
|
|
alias: "kokoro"
|
|
|
|
- name: "sycl-f32-kokoro"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-kokoro"
|
|
alias: "kokoro"
|
|
|
|
- name: "sycl-f16-kokoro"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-kokoro"
|
|
alias: "kokoro"
|
|
|
|
- name: "sycl-f16-kokoro-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-kokoro"
|
|
alias: "kokoro"
|
|
|
|
- name: "sycl-f32-kokoro-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-kokoro"
|
|
alias: "kokoro"
|
|
|
|
## faster-whisper
|
|
- name: "cuda11-faster-whisper-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
- name: "cuda12-faster-whisper-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
- name: "rocm-faster-whisper-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
- name: "sycl-f32-faster-whisper"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
- name: "sycl-f16-faster-whisper"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
- name: "sycl-f32-faster-whisper-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
- name: "sycl-f16-faster-whisper-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-faster-whisper"
|
|
alias: "faster-whisper"
|
|
|
|
## coqui
|
|
|
|
- name: "cuda11-coqui-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-coqui"
|
|
alias: "coqui"
|
|
|
|
- name: "cuda12-coqui-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-coqui"
|
|
alias: "coqui"
|
|
|
|
- name: "rocm-coqui-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-coqui"
|
|
alias: "coqui"
|
|
|
|
- name: "sycl-f32-coqui"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-coqui"
|
|
alias: "coqui"
|
|
|
|
- name: "sycl-f16-coqui"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-coqui"
|
|
alias: "coqui"
|
|
|
|
- name: "sycl-f32-coqui-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-coqui"
|
|
alias: "coqui"
|
|
|
|
- name: "sycl-f16-coqui-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-coqui"
|
|
alias: "coqui"
|
|
|
|
## bark
|
|
- name: "cuda11-bark-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-bark"
|
|
alias: "bark"
|
|
|
|
- name: "cuda12-bark-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-bark"
|
|
alias: "bark"
|
|
|
|
- name: "rocm-bark-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
|
|
alias: "bark"
|
|
|
|
- name: "sycl-f32-bark"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-bark"
|
|
alias: "bark"
|
|
|
|
- name: "sycl-f16-bark"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-bark"
|
|
alias: "bark"
|
|
|
|
- name: "sycl-f32-bark-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-bark"
|
|
alias: "bark"
|
|
|
|
- name: "sycl-f16-bark-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-bark"
|
|
alias: "bark"
|
|
|
|
## chatterbox
|
|
|
|
- name: "cuda11-chatterbox-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-chatterbox"
|
|
alias: "chatterbox"
|
|
|
|
- name: "cuda12-chatterbox-master"
|
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-chatterbox"
|
|
alias: "chatterbox"
|
|
|
|
- name: "cuda11-chatterbox"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-chatterbox"
|
|
alias: "chatterbox"
|
|
|
|
- name: "cuda12-chatterbox"
|
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-chatterbox"
|
|
alias: "chatterbox" |