transformers: correctly load automodels (#1643)

* backends(transformers): use AutoModel with LLM types

* examples: animagine-xl

* Add codellama examples
This commit is contained in:
Ettore Di Giacinto 2024-01-26 00:13:21 +01:00 committed by GitHub
parent 3733250b3c
commit cb7512734d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 1144 additions and 569 deletions

View file

@ -0,0 +1,32 @@
name: tinyllama-chat
backend: transformers
type: AutoModelForCausalLM
parameters:
model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
temperature: 0.2
top_k: 40
seed: -1
top_p: 0.95
max_tokens: 4096
template:
chat_message: |
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "user"}}user{{end}}
{{if .Content}}{{.Content}}{{end}}<|im_end|>
chat: |
{{.Input}}
<|im_start|>assistant
completion: |
{{.Input}}
stopwords:
- <|im_end|>
usage: |
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
"model": "tinyllama-chat",
"messages": [{"role": "user", "content": "Say this is a test!"}],
"temperature": 0.7
}'