chore(backend gallery): add linter

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2025-06-17 22:17:14 +02:00
parent 00bb5f01c0
commit e32f48beeb
2 changed files with 12 additions and 3 deletions

View file

@ -8,7 +8,7 @@ jobs:
steps: steps:
- name: 'Checkout' - name: 'Checkout'
uses: actions/checkout@master uses: actions/checkout@master
- name: 'Yamllint' - name: 'Yamllint model gallery'
uses: karancode/yamllint-github-action@master uses: karancode/yamllint-github-action@master
with: with:
yamllint_file_or_dir: 'gallery' yamllint_file_or_dir: 'gallery'
@ -16,3 +16,11 @@ jobs:
yamllint_comment: true yamllint_comment: true
env: env:
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: 'Yamllint Backend gallery'
uses: karancode/yamllint-github-action@master
with:
yamllint_file_or_dir: 'backend'
yamllint_strict: false
yamllint_comment: true
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,10 +1,11 @@
---
## vLLM ## vLLM
- &vllm - &vllm
name: "cuda11-vllm" name: "cuda11-vllm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-vllm" uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-vllm"
license: apache-2.0 license: apache-2.0
urls: urls:
- https://github.com/vllm-project/vllm - https://github.com/vllm-project/vllm
tags: tags:
- text-to-text - text-to-text
- multimodal - multimodal
@ -238,7 +239,7 @@
license: MIT license: MIT
name: "cuda11-faster-whisper-master" name: "cuda11-faster-whisper-master"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-faster-whisper" uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-faster-whisper"
alias: "faster-whisper" alias: "faster-whisper"
- !!merge <<: *faster-whisper - !!merge <<: *faster-whisper
name: "cuda12-faster-whisper-master" name: "cuda12-faster-whisper-master"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper" uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper"