From a913fd310de05b103ed50b8a6edcd557ee98f07c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 23 Aug 2024 09:24:34 +0200 Subject: [PATCH 0001/1530] models(gallery): add hermes-3-llama-3.1(8B,70B,405B) with vLLM (#3360) models(gallery): add hermes-3-llama-3.1 with vLLM it adds 8b, 70b and 405b to the gallery Signed-off-by: Ettore Di Giacinto --- gallery/hermes-vllm.yaml | 91 ++++++++++++++++++++++++++++++++++++++++ gallery/index.yaml | 32 ++++++++++++++ gallery/vllm.yaml | 29 +++++++++++++ 3 files changed, 152 insertions(+) create mode 100644 gallery/hermes-vllm.yaml create mode 100644 gallery/vllm.yaml diff --git a/gallery/hermes-vllm.yaml b/gallery/hermes-vllm.yaml new file mode 100644 index 00000000..80277da0 --- /dev/null +++ b/gallery/hermes-vllm.yaml @@ -0,0 +1,91 @@ +--- +name: "hermes-vllm" + +config_file: | + backend: vllm + context_size: 8192 + stopwords: + - "<|im_end|>" + - "" + - "<|eot_id|>" + - "<|end_of_text|>" + function: + disable_no_action: true + grammar: + # Uncomment the line below to enable grammar matching for JSON results if the model is breaking + # the output. This will make the model more accurate and won't break the JSON output. + # This however, will make parallel_calls not functional (it is a known bug) + # mixed_mode: true + disable: true + parallel_calls: true + expect_strings_after_json: true + json_regex_match: + - "(?s)(.*?)" + - "(?s)(.*)" + capture_llm_results: + - (?s)(.*?) + replace_llm_results: + - key: (?s)(.*?) + value: "" + + template: + use_tokenizer_template: true + chat: | + {{.Input -}} + <|im_start|>assistant + chat_message: | + <|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}} + {{- if .FunctionCall }} + + {{- else if eq .RoleName "tool" }} + + {{- end }} + {{- if .Content}} + {{.Content }} + {{- end }} + {{- if .FunctionCall}} + {{toJson .FunctionCall}} + {{- end }} + {{- if .FunctionCall }} + + {{- else if eq .RoleName "tool" }} + + {{- end }}<|im_end|> + completion: | + {{.Input}} + function: | + <|im_start|>system + You are a function calling AI model. + Here are the available tools: + + {{range .Functions}} + {'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }} + {{end}} + + You should call the tools provided to you sequentially + Please use XML tags to record your reasoning and planning before you call the functions as follows: + + {step-by-step reasoning and plan in bullet points} + + For each function call return a json object with function name and arguments within XML tags as follows: + + {"arguments": , "name": } + <|im_end|> + {{.Input -}} + <|im_start|>assistant +# Uncomment to specify a quantization method (optional) +# quantization: "awq" +# Uncomment to limit the GPU memory utilization (vLLM default is 0.9 for 90%) +# gpu_memory_utilization: 0.5 +# Uncomment to trust remote code from huggingface +# trust_remote_code: true +# Uncomment to enable eager execution +# enforce_eager: true +# Uncomment to specify the size of the CPU swap space per GPU (in GiB) +# swap_space: 2 +# Uncomment to specify the maximum length of a sequence (including prompt and output) +# max_model_len: 32768 +# Uncomment and specify the number of Tensor divisions. +# Allows you to partition and run large models. Performance gains are limited. +# https://github.com/vllm-project/vllm/issues/1435 +# tensor_parallel_size: 2 diff --git a/gallery/index.yaml b/gallery/index.yaml index 2a10723b..f34a09c4 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4752,6 +4752,38 @@ - filename: Hermes-3-Llama-3.1-70B.Q4_K_M.gguf sha256: 955c2f42caade4278f3c9dbffa32bb74572652b20e49e5340e782de3585bbe3f uri: huggingface://NousResearch/Hermes-3-Llama-3.1-70B-GGUF/Hermes-3-Llama-3.1-70B.Q4_K_M.gguf +- &hermes-vllm + url: "github:mudler/LocalAI/gallery/hermes-vllm.yaml@master" + name: "hermes-3-llama-3.1-8b:vllm" + icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vG6j5WxHX09yj32vgjJlI.jpeg + tags: + - llm + - vllm + - gpu + - function-calling + license: llama-3 + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B + description: | + Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. It is designed to focus on aligning LLMs to the user, with powerful steering capabilities and control given to the end user. The model uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. It also supports function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills. + overrides: + parameters: + model: NousResearch/Hermes-3-Llama-3.1-8B +- !!merge <<: *hermes-vllm + name: "hermes-3-llama-3.1-70b:vllm" + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-70B + overrides: + parameters: + model: NousResearch/Hermes-3-Llama-3.1-70B +- !!merge <<: *hermes-vllm + name: "hermes-3-llama-3.1-405b:vllm" + icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/-kj_KflXsdpcZoTQsvx7W.jpeg + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-405B + overrides: + parameters: + model: NousResearch/Hermes-3-Llama-3.1-405B - !!merge <<: *hermes-2-pro-mistral name: "biomistral-7b" description: | diff --git a/gallery/vllm.yaml b/gallery/vllm.yaml new file mode 100644 index 00000000..d36ea96d --- /dev/null +++ b/gallery/vllm.yaml @@ -0,0 +1,29 @@ +--- +name: "vllm" + +config_file: | + backend: vllm + function: + disable_no_action: true + grammar: + disable: true + parallel_calls: true + expect_strings_after_json: true + template: + use_tokenizer_template: true + # Uncomment to specify a quantization method (optional) + # quantization: "awq" + # Uncomment to limit the GPU memory utilization (vLLM default is 0.9 for 90%) + # gpu_memory_utilization: 0.5 + # Uncomment to trust remote code from huggingface + # trust_remote_code: true + # Uncomment to enable eager execution + # enforce_eager: true + # Uncomment to specify the size of the CPU swap space per GPU (in GiB) + # swap_space: 2 + # Uncomment to specify the maximum length of a sequence (including prompt and output) + # max_model_len: 32768 + # Uncomment and specify the number of Tensor divisions. + # Allows you to partition and run large models. Performance gains are limited. + # https://github.com/vllm-project/vllm/issues/1435 + # tensor_parallel_size: 2 From a9c521eb41dc2dd63769e5362f05d9ab5d8bec50 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 23 Aug 2024 10:29:04 +0200 Subject: [PATCH 0002/1530] fix(deps): bump grpcio (#3362) Signed-off-by: Ettore Di Giacinto --- backend/python/autogptq/requirements.txt | 2 +- backend/python/bark/requirements.txt | 2 +- backend/python/common/template/requirements.txt | 2 +- backend/python/coqui/requirements.txt | 2 +- backend/python/diffusers/requirements.txt | 2 +- backend/python/exllama/requirements.txt | 2 +- backend/python/exllama2/requirements.txt | 2 +- backend/python/mamba/requirements.txt | 2 +- backend/python/openvoice/requirements-intel.txt | 2 +- backend/python/openvoice/requirements.txt | 2 +- backend/python/parler-tts/requirements.txt | 2 +- backend/python/rerankers/requirements.txt | 2 +- backend/python/sentencetransformers/requirements.txt | 2 +- backend/python/transformers-musicgen/requirements.txt | 2 +- backend/python/transformers/requirements.txt | 2 +- backend/python/vall-e-x/requirements.txt | 2 +- backend/python/vllm/requirements.txt | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/backend/python/autogptq/requirements.txt b/backend/python/autogptq/requirements.txt index 53946f23..174ccc94 100644 --- a/backend/python/autogptq/requirements.txt +++ b/backend/python/autogptq/requirements.txt @@ -1,6 +1,6 @@ accelerate auto-gptq==0.7.1 -grpcio==1.65.4 +grpcio==1.66.0 protobuf certifi transformers \ No newline at end of file diff --git a/backend/python/bark/requirements.txt b/backend/python/bark/requirements.txt index 08bfaec3..ed15b678 100644 --- a/backend/python/bark/requirements.txt +++ b/backend/python/bark/requirements.txt @@ -1,4 +1,4 @@ bark==0.1.5 -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt index 35173155..047ef7d5 100644 --- a/backend/python/common/template/requirements.txt +++ b/backend/python/common/template/requirements.txt @@ -1,2 +1,2 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf \ No newline at end of file diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index 6125f739..8fb684c0 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,4 @@ TTS==0.22.0 -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/diffusers/requirements.txt b/backend/python/diffusers/requirements.txt index b4195fc5..2f85b4e3 100644 --- a/backend/python/diffusers/requirements.txt +++ b/backend/python/diffusers/requirements.txt @@ -1,5 +1,5 @@ setuptools -grpcio==1.65.4 +grpcio==1.66.0 pillow protobuf certifi diff --git a/backend/python/exllama/requirements.txt b/backend/python/exllama/requirements.txt index 99b81098..3e227c2c 100644 --- a/backend/python/exllama/requirements.txt +++ b/backend/python/exllama/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi setuptools \ No newline at end of file diff --git a/backend/python/exllama2/requirements.txt b/backend/python/exllama2/requirements.txt index ce15b0b6..d5c2cc5c 100644 --- a/backend/python/exllama2/requirements.txt +++ b/backend/python/exllama2/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.4 +grpcio==1.66.0 protobuf certifi wheel diff --git a/backend/python/mamba/requirements.txt b/backend/python/mamba/requirements.txt index 920971ce..9b4dd772 100644 --- a/backend/python/mamba/requirements.txt +++ b/backend/python/mamba/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index 25921f8f..75184a33 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -2,7 +2,7 @@ intel-extension-for-pytorch torch optimum[openvino] -grpcio==1.65.5 +grpcio==1.66.0 protobuf librosa==0.9.1 faster-whisper==1.0.3 diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index 13ce9c28..71991dc0 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf librosa faster-whisper diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index 1f17c892..b843981e 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi llvmlite==0.43.0 \ No newline at end of file diff --git a/backend/python/rerankers/requirements.txt b/backend/python/rerankers/requirements.txt index 2a8d18b1..9b4dd772 100644 --- a/backend/python/rerankers/requirements.txt +++ b/backend/python/rerankers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.65.4 +grpcio==1.66.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements.txt b/backend/python/sentencetransformers/requirements.txt index 920971ce..9b4dd772 100644 --- a/backend/python/sentencetransformers/requirements.txt +++ b/backend/python/sentencetransformers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/transformers-musicgen/requirements.txt b/backend/python/transformers-musicgen/requirements.txt index a0076112..f4512663 100644 --- a/backend/python/transformers-musicgen/requirements.txt +++ b/backend/python/transformers-musicgen/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf scipy==1.14.0 certifi \ No newline at end of file diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index 5531ea0e..9e056af6 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements.txt b/backend/python/vall-e-x/requirements.txt index 920971ce..9b4dd772 100644 --- a/backend/python/vall-e-x/requirements.txt +++ b/backend/python/vall-e-x/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/vllm/requirements.txt b/backend/python/vllm/requirements.txt index 99b81098..3e227c2c 100644 --- a/backend/python/vllm/requirements.txt +++ b/backend/python/vllm/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.65.5 +grpcio==1.66.0 protobuf certifi setuptools \ No newline at end of file From db2d8f4d04e7a9843bc257e93c3eb3ef6d9e45a9 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:44:06 +0200 Subject: [PATCH 0003/1530] docs: :arrow_up: update docs version mudler/LocalAI (#3366) :arrow_up: Update docs version mudler/LocalAI Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- docs/data/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data/version.json b/docs/data/version.json index d07ef798..dc128c66 100644 --- a/docs/data/version.json +++ b/docs/data/version.json @@ -1,3 +1,3 @@ { - "version": "v2.19.4" + "version": "v2.20.1" } From 61fe2404a044edd59d0473d4665ad77e96cf7834 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:49:18 +0200 Subject: [PATCH 0004/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `3ba780e2a8f0ffe13f571b27f0bbf2ca5a199efc` (#3361) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2796b03e..2ecbaea8 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=fc54ef0d1c138133a01933296d50a36a1ab64735 +CPPLLAMA_VERSION?=3ba780e2a8f0ffe13f571b27f0bbf2ca5a199efc # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From ac5f6f210b25690bef44ea6c804a95d7082dc73b Mon Sep 17 00:00:00 2001 From: Dave Date: Fri, 23 Aug 2024 18:27:14 -0400 Subject: [PATCH 0005/1530] feat: external backend launching log improvements and relative path support (#3348) * specify workdir when launching external backend for safety / relative paths, bump version, logs Signed-off-by: Dave Lee * sneak in a devcontainer fix Signed-off-by: Dave Lee --------- Signed-off-by: Dave Lee --- .devcontainer-scripts/utils.sh | 3 +- go.mod | 23 ++--- go.sum | 166 ++++----------------------------- pkg/model/initializers.go | 5 +- pkg/model/process.go | 9 +- 5 files changed, 41 insertions(+), 165 deletions(-) diff --git a/.devcontainer-scripts/utils.sh b/.devcontainer-scripts/utils.sh index 02b588ae..7018c745 100644 --- a/.devcontainer-scripts/utils.sh +++ b/.devcontainer-scripts/utils.sh @@ -35,8 +35,9 @@ config_remote() { # # Param 1: bash array, filenames relative to the customization directory that should be copied to ~/.ssh setup_ssh() { + mkdir -p ~/.ssh local files=("$@") - for file in "${files[@]}"; then + for file in "${files[@]}" ; do local cfile="/devcontainer-customization/${file}" local hfile="~/.ssh/${file}" if [ ! -f "${hfile}" ]; then diff --git a/go.mod b/go.mod index ff978dbe..e9255a1e 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/gofiber/fiber/v2 v2.52.5 github.com/gofiber/swagger v1.0.0 github.com/gofiber/template/html/v2 v2.1.2 + github.com/gofrs/flock v0.12.1 github.com/google/go-containerregistry v0.19.2 github.com/google/uuid v1.6.0 github.com/hpcloud/tail v1.0.0 @@ -32,10 +33,9 @@ require ( github.com/libp2p/go-libp2p v0.36.2 github.com/mholt/archiver/v3 v3.5.1 github.com/microcosm-cc/bluemonday v1.0.26 + github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 github.com/mudler/edgevpn v0.27.3 - github.com/mudler/go-processmanager v0.0.0-20230818213616-f204007f963c github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f - github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20240606155928-41c9013fa46a github.com/onsi/ginkgo/v2 v2.20.0 github.com/onsi/gomega v1.34.1 github.com/ory/dockertest/v3 v3.10.0 @@ -64,12 +64,8 @@ require ( ) require ( - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0 // indirect - github.com/gofrs/flock v0.12.1 // indirect - github.com/labstack/echo/v4 v4.12.0 // indirect - github.com/labstack/gommon v0.4.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pion/datachannel v1.5.8 // indirect @@ -88,11 +84,8 @@ require ( github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/webrtc/v3 v3.3.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/urfave/cli/v2 v2.27.4 // indirect - github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/shirou/gopsutil/v4 v4.24.7 // indirect github.com/wlynxg/anet v0.0.4 // indirect - github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.uber.org/mock v0.4.0 // indirect ) @@ -140,7 +133,7 @@ require ( github.com/go-audio/riff v1.0.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect @@ -196,7 +189,7 @@ require ( github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/libp2p/zeroconf/v2 v2.2.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/lufia/plan9stats v0.0.0-20240819163618-b1d8f4d146e7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -242,7 +235,7 @@ require ( github.com/pkoukk/tiktoken-go v0.1.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -260,8 +253,8 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/swaggo/files/v2 v2.0.0 // indirect github.com/tinylib/msgp v1.1.8 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/ulikunitz/xz v0.5.9 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/tcplisten v1.0.0 // indirect diff --git a/go.sum b/go.sum index 338e6219..e09af5ce 100644 --- a/go.sum +++ b/go.sum @@ -90,8 +90,6 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/mds v0.7.0 h1:7QoYqiPl18C0h7CLq9z9/qUH5Vr62V9677yJZHGLoQM= github.com/creachadair/mds v0.7.0/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo= github.com/creachadair/otp v0.4.2 h1:ngNMaD6Tzd7UUNRFyed7ykZFn/Wr5sSs5ffqZWm9pu8= @@ -129,8 +127,6 @@ github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj6 github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk= @@ -147,7 +143,6 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20240626202019-c118733a29ad h1:dQ93Vd6i25o+zH9vvnZ8mu7jtJQ6jT3D+zE3V8Q49n0= @@ -168,8 +163,9 @@ github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= @@ -184,7 +180,6 @@ github.com/go-skynet/go-llama.cpp v0.0.0-20240314183750-6a8041ef6b46 h1:lALhXzDk github.com/go-skynet/go-llama.cpp v0.0.0-20240314183750-6a8041ef6b46/go.mod h1:iub0ugfTnflE3rcIuqV2pQSo15nEw3GLW/utm5gyERo= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= @@ -228,10 +223,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -246,8 +238,6 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.19.2 h1:TannFKE1QSajsP6hPWb5oJNgKe1IKjHukIKDUmvsV6w= @@ -258,8 +248,6 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -365,10 +353,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+kI0= -github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM= -github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= -github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2 h1:hRGSmZu7j271trc9sneMrpOW7GN5ngLm8YUZIPzf394= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -377,10 +363,6 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= -github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= -github.com/libp2p/go-libp2p v0.35.4 h1:FDiBUYLkueFwsuNJUZaxKRdpKvBOWU64qQPL768bSeg= -github.com/libp2p/go-libp2p v0.35.4/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= @@ -389,8 +371,6 @@ github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0Trt github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= -github.com/libp2p/go-libp2p-pubsub v0.11.0 h1:+JvS8Kty0OiyUiN0i8H5JbaCgjnJTRnTHe4rU88dLFc= -github.com/libp2p/go-libp2p-pubsub v0.11.0/go.mod h1:QEb+hEV9WL9wCiUAnpY29FZR6W3zK8qYlaml8R4q6gQ= github.com/libp2p/go-libp2p-pubsub v0.12.0 h1:PENNZjSfk8KYxANRlpipdS7+BfLmOl3L2E/6vSNjbdI= github.com/libp2p/go-libp2p-pubsub v0.12.0/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= @@ -413,8 +393,8 @@ github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20240819163618-b1d8f4d146e7 h1:5RK988zAqB3/AN3opGfRpoQgAVqr6/A5+qRTi67VUZY= +github.com/lufia/plan9stats v0.0.0-20240819163618-b1d8f4d146e7/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -440,8 +420,6 @@ github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3r github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -477,20 +455,14 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mudler/edgevpn v0.26.2 h1:OK4jfk7sYjuU7vCh+geUJk38lsxRgMk+EdsS9s0hioE= -github.com/mudler/edgevpn v0.26.2/go.mod h1:lplntB9N6LzGNqeSM3XHCq8kyDPsNhY3jqEbWGD2WaQ= -github.com/mudler/edgevpn v0.27.0 h1:FnBVzPs098DTgbUkiwm22n30hmEVBAq+PVpXanqx6qo= -github.com/mudler/edgevpn v0.27.0/go.mod h1:Hwvr+i+dePgn/Yh+EMMvqcw9ByUCLAWD9TgYtJYV95Y= -github.com/mudler/edgevpn v0.27.1 h1:UKW7/JW4l2cBAPMRnlZRHbuFDGrv7resVJlFD34WBDE= -github.com/mudler/edgevpn v0.27.1/go.mod h1:PK7rl0QQQTdlpie9rlaS7DguH500ogqproQli/QwrxU= github.com/mudler/edgevpn v0.27.2 h1:FsQ95jPCDJP9LzKJYCHx70z08DGXK5yrHMzH9Qok3nE= github.com/mudler/edgevpn v0.27.2/go.mod h1:PK7rl0QQQTdlpie9rlaS7DguH500ogqproQli/QwrxU= github.com/mudler/edgevpn v0.27.3 h1:9g6M7Q+2GdwDN12KmjhYJDi69cttvDW7luBmZioD2ZM= github.com/mudler/edgevpn v0.27.3/go.mod h1:PK7rl0QQQTdlpie9rlaS7DguH500ogqproQli/QwrxU= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d h1:8udOFrDf/I83JL0/u22j6U6Q9z9LoSdby2a/DWdd0/s= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= -github.com/mudler/go-processmanager v0.0.0-20230818213616-f204007f963c h1:CI5uGwqBpN8N7BrSKC+nmdfw+9nPQIDyjHHlaIiitZI= -github.com/mudler/go-processmanager v0.0.0-20230818213616-f204007f963c/go.mod h1:gY3wyrhkRySJtmtI/JPt4a2mKv48h/M9pEZIW+SjeC0= +github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 h1:FVT07EI8njvsD4tC2Hw8Xhactp5AWhsQWD4oTeQuSAU= +github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82/go.mod h1:Urp7LG5jylKoDq0663qeBh0pINGcRl35nXdKx82PSoU= github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f h1:cxtMSRkUfy+mjIQ3yMrU0txwQ4It913NEN4m1H8WWgo= github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw= github.com/mudler/water v0.0.0-20221010214108-8c7313014ce0 h1:Qh6ghkMgTu6siFbTf7L3IszJmshMhXxNL4V+t7IIA6w= @@ -527,30 +499,16 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20240606155928-41c9013fa46a h1:jLmaG6BYcFvUDGFJM8B9kOM2yfvaTLxrKcFkBn4nstA= -github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20240606155928-41c9013fa46a/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI= github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -581,21 +539,13 @@ github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM= github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= -github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= -github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= -github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -608,39 +558,27 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= -github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= -github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= -github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= -github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= -github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= -github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU= -github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -652,11 +590,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -671,8 +607,6 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= -github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= @@ -693,16 +627,16 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sashabaranov/go-openai v1.26.2 h1:cVlQa3gn3eYqNXRW03pPlpy6zLG52EU4g0FrWXc0EFI= github.com/sashabaranov/go-openai v1.26.2/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= github.com/schollz/progressbar/v3 v3.14.4 h1:W9ZrDSJk7eqmQhd3uxFNNcTr0QL+xuGNI9dEMrw0r74= github.com/schollz/progressbar/v3 v3.14.4/go.mod h1:aT3UQ7yGm+2ZjeXPqsjTenwL3ddUiuZ0kfQ/2tHlyNI= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shirou/gopsutil/v4 v4.24.7 h1:V9UGTK4gQ8HvcnPKf6Zt3XHyQq/peaekfxpJ2HSocJk= +github.com/shirou/gopsutil/v4 v4.24.7/go.mod h1:0uW/073rP7FYLOkvxolUQM5rMOLTNmRXnFKafpb71rw= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -778,12 +712,10 @@ github.com/thxcode/gguf-parser-go v0.1.0 h1:J4QruXyEQGjrAKeKZFlsD2na9l4XF5+bjR19 github.com/thxcode/gguf-parser-go v0.1.0/go.mod h1:Tn1PsO/YDEtLIxm1+QDCjIIH9L/9Sr7+KpxZKm0sEuE= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tmc/langchaingo v0.1.12 h1:yXwSu54f3b1IKw0jJ5/DWu+qFVH1NBblwC0xddBzGJE= github.com/tmc/langchaingo v0.1.12/go.mod h1:cd62xD6h+ouk8k/QQFhOsjRYBSA1JJ5UVKXSIgm7Ni4= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -791,18 +723,11 @@ github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= -github.com/urfave/cli/v2 v2.27.3 h1:/POWahRmdh7uztQ3CYnaDddk0Rm90PyOgIxgW2rr41M= -github.com/urfave/cli/v2 v2.27.3/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= -github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= -github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8= github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM= -github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= -github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= @@ -830,8 +755,6 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -841,7 +764,6 @@ github.com/yuin/goldmark v1.5.4 h1:2uY/xC0roWy8IBEGLgB1ywIoEJFGmRrX21YQcvGZzjU= github.com/yuin/goldmark v1.5.4/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark-emoji v1.0.2 h1:c/RgTShNgHTtc6xdz2KKI74jJr6rWi7FPgnP9GAsO5s= github.com/yuin/goldmark-emoji v1.0.2/go.mod h1:RhP/RWpexdp+KHs7ghKnifRoIs/Bq4nDS7tRbCkOwKY= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -861,12 +783,8 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -897,20 +815,11 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -927,8 +836,6 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -943,27 +850,20 @@ golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -981,8 +881,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -992,20 +890,15 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1019,6 +912,7 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1026,15 +920,10 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1044,14 +933,9 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1063,11 +947,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1089,14 +970,11 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1146,8 +1024,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1159,13 +1035,11 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1184,8 +1058,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 11980f03..d85da6c1 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -294,13 +294,15 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string if uri, ok := o.externalBackends[backend]; ok { log.Debug().Msgf("Loading external backend: %s", uri) // check if uri is a file or a address - if _, err := os.Stat(uri); err == nil { + if fi, err := os.Stat(uri); err == nil { + log.Debug().Msgf("external backend is file: %+v", fi) serverAddress, err := getFreeAddress() if err != nil { return "", fmt.Errorf("failed allocating free ports: %s", err.Error()) } // Make sure the process is executable if err := ml.startProcess(uri, o.model, serverAddress); err != nil { + log.Error().Err(err).Str("path", uri).Msg("failed to launch ") return "", err } @@ -308,6 +310,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string client = ModelAddress(serverAddress) } else { + log.Debug().Msg("external backend is uri") // address client = ModelAddress(uri) } diff --git a/pkg/model/process.go b/pkg/model/process.go index 7b7ecb97..6a4fd326 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "os/signal" + "path/filepath" "strconv" "strings" "syscall" @@ -79,11 +80,17 @@ func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string log.Debug().Msgf("GRPC Service for %s will be running at: '%s'", id, serverAddress) + workDir, err := filepath.Abs(filepath.Dir(grpcProcess)) + if err != nil { + return err + } + grpcControlProcess := process.New( process.WithTemporaryStateDir(), - process.WithName(grpcProcess), + process.WithName(filepath.Base(grpcProcess)), process.WithArgs(append(args, []string{"--addr", serverAddress}...)...), process.WithEnvironment(os.Environ()...), + process.WithWorkDir(workDir), ) if ml.wd != nil { From 84d6e5a9879313bfdc32de013617d7a27a03ef71 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 24 Aug 2024 00:29:24 +0200 Subject: [PATCH 0006/1530] chore(model-gallery): add more quants for popular models (#3365) * models(gallery): add higher quants for some llama and hermes Signed-off-by: Ettore Di Giacinto * models(gallery): vllm: specify a reasonable max_tokens Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- gallery/hermes-vllm.yaml | 2 ++ gallery/index.yaml | 48 ++++++++++++++++++++++++++++++++++++++++ gallery/vllm.yaml | 3 +++ 3 files changed, 53 insertions(+) diff --git a/gallery/hermes-vllm.yaml b/gallery/hermes-vllm.yaml index 80277da0..e8ed96b7 100644 --- a/gallery/hermes-vllm.yaml +++ b/gallery/hermes-vllm.yaml @@ -3,6 +3,8 @@ name: "hermes-vllm" config_file: | backend: vllm + parameters: + max_tokens: 8192 context_size: 8192 stopwords: - "<|im_end|>" diff --git a/gallery/index.yaml b/gallery/index.yaml index f34a09c4..694df8d3 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -85,6 +85,24 @@ - filename: Meta-Llama-3.1-8B-Instruct.Q4_K_M.gguf sha256: c2f17f44af962660d1ad4cb1af91a731f219f3b326c2b14441f9df1f347f2815 uri: huggingface://MaziyarPanahi/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "meta-llama-3.1-8b-instruct:Q8_grammar-functioncall" + url: "github:mudler/LocalAI/gallery/llama3.1-instruct-grammar.yaml@master" + urls: + - https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct + - https://huggingface.co/MaziyarPanahi/Meta-Llama-3.1-8B-Instruct-GGUF + description: | + This is the standard Llama 3.1 8B Instruct model with grammar and function call enabled. + + When grammars are enabled in LocalAI, the LLM is forced to output valid tools constrained by BNF grammars. This can be useful for ensuring that the model outputs are valid and can be used in a production environment. + For more information on how to use grammars in LocalAI, see https://localai.io/features/openai-functions/#advanced and https://localai.io/features/constrained_grammars/. + overrides: + parameters: + model: Meta-Llama-3.1-8B-Instruct.Q8_0.gguf + files: + - filename: Meta-Llama-3.1-8B-Instruct.Q8_0.gguf + sha256: f8d608c983b83a1bf28229bc9beb4294c91f5d4cbfe2c1829566b4d7c4693eeb + uri: huggingface://MaziyarPanahi/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct.Q8_0.gguf - !!merge <<: *llama31 name: "meta-llama-3.1-8b-claude-imat" urls: @@ -4737,6 +4755,21 @@ - filename: Hermes-3-Llama-3.1-8B.Q4_K_M.gguf sha256: d4403ce5a6e930f4c2509456388c20d633a15ff08dd52ef3b142ff1810ec3553 uri: huggingface://NousResearch/Hermes-3-Llama-3.1-8B-GGUF/Hermes-3-Llama-3.1-8B.Q4_K_M.gguf +- !!merge <<: *hermes-2-pro-mistral + name: "hermes-3-llama-3.1-8b:Q8" + icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/bMcZ3sNNQK8SRZpHXBmwM.jpeg + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B-GGUF + description: | + Hermes 3 is a generalist language model developed by Nous Research. It is an advanced agentic model with improved roleplaying, reasoning, multi-turn conversation, long context coherence, and generalist assistant capabilities. The model is built on top of the Llama-3 architecture and has been fine-tuned to achieve superior performance in various tasks. It is designed to be a powerful and reliable tool for solving complex problems and assisting users in achieving their goals. Hermes 3 can be used for a wide range of applications, including research, education, and personal assistant tasks. It is available on the Hugging Face model hub for easy access and integration into existing workflows. + overrides: + parameters: + model: Hermes-3-Llama-3.1-8B.Q8_0.gguf + files: + - filename: Hermes-3-Llama-3.1-8B.Q8_0.gguf + sha256: c77c263f78b2f56fbaddd3ef2af750fda6ebb4344a546aaa0bfdd546b1ca8d84 + uri: huggingface://NousResearch/Hermes-3-Llama-3.1-8B-GGUF/Hermes-3-Llama-3.1-8B.Q8_0.gguf - !!merge <<: *hermes-2-pro-mistral name: "hermes-3-llama-3.1-70b" icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vG6j5WxHX09yj32vgjJlI.jpeg @@ -4752,6 +4785,21 @@ - filename: Hermes-3-Llama-3.1-70B.Q4_K_M.gguf sha256: 955c2f42caade4278f3c9dbffa32bb74572652b20e49e5340e782de3585bbe3f uri: huggingface://NousResearch/Hermes-3-Llama-3.1-70B-GGUF/Hermes-3-Llama-3.1-70B.Q4_K_M.gguf +- !!merge <<: *hermes-2-pro-mistral + name: "hermes-3-llama-3.1-70b:Q5_K_M" + icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vG6j5WxHX09yj32vgjJlI.jpeg + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-70B + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-70B-GGUF + description: | + Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. It is designed to focus on aligning LLMs to the user, with powerful steering capabilities and control given to the end user. The model uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. It also supports function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills. + overrides: + parameters: + model: Hermes-3-Llama-3.1-70B.Q5_K_M.gguf + files: + - filename: Hermes-3-Llama-3.1-70B.Q5_K_M.gguf + sha256: 10ae3e0441b14c4a6476436f3c14e8bcacc7928aa3e8ce978d053287289a7ebb + uri: huggingface://NousResearch/Hermes-3-Llama-3.1-70B-GGUF/Hermes-3-Llama-3.1-70B.Q5_K_M.gguf - &hermes-vllm url: "github:mudler/LocalAI/gallery/hermes-vllm.yaml@master" name: "hermes-3-llama-3.1-8b:vllm" diff --git a/gallery/vllm.yaml b/gallery/vllm.yaml index d36ea96d..5a2f16ce 100644 --- a/gallery/vllm.yaml +++ b/gallery/vllm.yaml @@ -2,6 +2,9 @@ name: "vllm" config_file: | + context_size: 8192 + parameters: + max_tokens: 8192 backend: vllm function: disable_no_action: true From 81ae92f017e3af1f3f766e0f3911dbc78a9742e5 Mon Sep 17 00:00:00 2001 From: Dave Date: Fri, 23 Aug 2024 20:20:28 -0400 Subject: [PATCH 0007/1530] feat: elevenlabs `sound-generation` api (#3355) * initial version of elevenlabs compatible soundgeneration api and cli command Signed-off-by: Dave Lee * minor cleanup Signed-off-by: Dave Lee * restore TTS, add test Signed-off-by: Dave Lee * remove stray s Signed-off-by: Dave Lee * fix Signed-off-by: Dave Lee --------- Signed-off-by: Dave Lee Signed-off-by: Ettore Di Giacinto Co-authored-by: Ettore Di Giacinto --- backend/backend.proto | 12 ++ .../python/transformers-musicgen/backend.py | 60 +++++++++- backend/python/transformers-musicgen/test.py | 21 +++- core/backend/llm.go | 2 +- core/backend/soundgeneration.go | 74 ++++++++++++ core/backend/tts.go | 30 ++--- core/cli/cli.go | 17 +-- core/cli/soundgeneration.go | 110 ++++++++++++++++++ .../endpoints/elevenlabs/soundgeneration.go | 65 +++++++++++ core/http/routes/elevenlabs.go | 2 + core/schema/elevenlabs.go | 8 ++ .../Sound Generation/musicgen.bru | 23 ++++ go.sum | 3 + pkg/grpc/backend.go | 1 + pkg/grpc/base/base.go | 4 + pkg/grpc/client.go | 20 ++++ pkg/grpc/embed.go | 4 + pkg/grpc/interface.go | 1 + pkg/grpc/server.go | 14 ++- pkg/utils/path.go | 16 +++ 20 files changed, 450 insertions(+), 37 deletions(-) create mode 100644 core/backend/soundgeneration.go create mode 100644 core/cli/soundgeneration.go create mode 100644 core/http/endpoints/elevenlabs/soundgeneration.go create mode 100644 examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru diff --git a/backend/backend.proto b/backend/backend.proto index 0d3d5f7f..4a8f31a9 100644 --- a/backend/backend.proto +++ b/backend/backend.proto @@ -16,6 +16,7 @@ service Backend { rpc GenerateImage(GenerateImageRequest) returns (Result) {} rpc AudioTranscription(TranscriptRequest) returns (TranscriptResult) {} rpc TTS(TTSRequest) returns (Result) {} + rpc SoundGeneration(SoundGenerationRequest) returns (Result) {} rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {} rpc Status(HealthMessage) returns (StatusResponse) {} @@ -270,6 +271,17 @@ message TTSRequest { optional string language = 5; } +message SoundGenerationRequest { + string text = 1; + string model = 2; + string dst = 3; + optional float duration = 4; + optional float temperature = 5; + optional bool sample = 6; + optional string src = 7; + optional int32 src_divisor = 8; +} + message TokenizationResponse { int32 length = 1; repeated int32 tokens = 2; diff --git a/backend/python/transformers-musicgen/backend.py b/backend/python/transformers-musicgen/backend.py index d41d9a5c..b9f1facf 100644 --- a/backend/python/transformers-musicgen/backend.py +++ b/backend/python/transformers-musicgen/backend.py @@ -15,7 +15,7 @@ import backend_pb2_grpc import grpc -from scipy.io.wavfile import write as write_wav +from scipy.io import wavfile from transformers import AutoProcessor, MusicgenForConditionalGeneration _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -63,6 +63,61 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): return backend_pb2.Result(message="Model loaded successfully", success=True) + def SoundGeneration(self, request, context): + model_name = request.model + if model_name == "": + return backend_pb2.Result(success=False, message="request.model is required") + try: + self.processor = AutoProcessor.from_pretrained(model_name) + self.model = MusicgenForConditionalGeneration.from_pretrained(model_name) + inputs = None + if request.text == "": + inputs = self.model.get_unconditional_inputs(num_samples=1) + elif request.HasField('src'): + # TODO SECURITY CODE GOES HERE LOL + # WHO KNOWS IF THIS WORKS??? + sample_rate, wsamples = wavfile.read('path_to_your_file.wav') + + if request.HasField('src_divisor'): + wsamples = wsamples[: len(wsamples) // request.src_divisor] + + inputs = self.processor( + audio=wsamples, + sampling_rate=sample_rate, + text=[request.text], + padding=True, + return_tensors="pt", + ) + else: + inputs = self.processor( + text=[request.text], + padding=True, + return_tensors="pt", + ) + + tokens = 256 + if request.HasField('duration'): + tokens = int(request.duration * 51.2) # 256 tokens = 5 seconds, therefore 51.2 tokens is one second + guidance = 3.0 + if request.HasField('temperature'): + guidance = request.temperature + dosample = True + if request.HasField('sample'): + dosample = request.sample + audio_values = self.model.generate(**inputs, do_sample=dosample, guidance_scale=guidance, max_new_tokens=tokens) + print("[transformers-musicgen] SoundGeneration generated!", file=sys.stderr) + sampling_rate = self.model.config.audio_encoder.sampling_rate + wavfile.write(request.dst, rate=sampling_rate, data=audio_values[0, 0].numpy()) + print("[transformers-musicgen] SoundGeneration saved to", request.dst, file=sys.stderr) + print("[transformers-musicgen] SoundGeneration for", file=sys.stderr) + print("[transformers-musicgen] SoundGeneration requested tokens", tokens, file=sys.stderr) + print(request, file=sys.stderr) + except Exception as err: + return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") + return backend_pb2.Result(success=True) + + +# The TTS endpoint is older, and provides fewer features, but exists for compatibility reasons def TTS(self, request, context): model_name = request.model if model_name == "": @@ -75,8 +130,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): padding=True, return_tensors="pt", ) - tokens = 256 - # TODO get tokens from request? + tokens = 512 # No good place to set the "length" in TTS, so use 10s as a sane default audio_values = self.model.generate(**inputs, max_new_tokens=tokens) print("[transformers-musicgen] TTS generated!", file=sys.stderr) sampling_rate = self.model.config.audio_encoder.sampling_rate diff --git a/backend/python/transformers-musicgen/test.py b/backend/python/transformers-musicgen/test.py index 777b399a..295de65e 100644 --- a/backend/python/transformers-musicgen/test.py +++ b/backend/python/transformers-musicgen/test.py @@ -63,7 +63,7 @@ class TestBackendServicer(unittest.TestCase): def test_tts(self): """ - This method tests if the embeddings are generated successfully + This method tests if TTS is generated successfully """ try: self.setUp() @@ -77,5 +77,24 @@ class TestBackendServicer(unittest.TestCase): except Exception as err: print(err) self.fail("TTS service failed") + finally: + self.tearDown() + + def test_sound_generation(self): + """ + This method tests if SoundGeneration is generated successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/musicgen-small")) + self.assertTrue(response.success) + sg_request = backend_pb2.SoundGenerationRequest(text="80s TV news production music hit for tonight's biggest story") + sg_response = stub.SoundGeneration(sg_request) + self.assertIsNotNone(sg_response) + except Exception as err: + print(err) + self.fail("SoundGeneration service failed") finally: self.tearDown() \ No newline at end of file diff --git a/core/backend/llm.go b/core/backend/llm.go index 9268fbbc..72c4ad9f 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -87,7 +87,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im case string: protoMessages[i].Content = ct default: - return nil, fmt.Errorf("Unsupported type for schema.Message.Content for inference: %T", ct) + return nil, fmt.Errorf("unsupported type for schema.Message.Content for inference: %T", ct) } } } diff --git a/core/backend/soundgeneration.go b/core/backend/soundgeneration.go new file mode 100644 index 00000000..abd5221b --- /dev/null +++ b/core/backend/soundgeneration.go @@ -0,0 +1,74 @@ +package backend + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/pkg/grpc/proto" + "github.com/mudler/LocalAI/pkg/model" + "github.com/mudler/LocalAI/pkg/utils" +) + +func SoundGeneration( + backend string, + modelFile string, + text string, + duration *float32, + temperature *float32, + doSample *bool, + sourceFile *string, + sourceDivisor *int32, + loader *model.ModelLoader, + appConfig *config.ApplicationConfig, + backendConfig config.BackendConfig, +) (string, *proto.Result, error) { + if backend == "" { + return "", nil, fmt.Errorf("backend is a required parameter") + } + + grpcOpts := gRPCModelOpts(backendConfig) + opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ + model.WithBackendString(backend), + model.WithModel(modelFile), + model.WithContext(appConfig.Context), + model.WithAssetDir(appConfig.AssetsDestination), + model.WithLoadGRPCLoadModelOpts(grpcOpts), + }) + + soundGenModel, err := loader.BackendLoader(opts...) + if err != nil { + return "", nil, err + } + + if soundGenModel == nil { + return "", nil, fmt.Errorf("could not load sound generation model") + } + + if err := os.MkdirAll(appConfig.AudioDir, 0750); err != nil { + return "", nil, fmt.Errorf("failed creating audio directory: %s", err) + } + + fileName := utils.GenerateUniqueFileName(appConfig.AudioDir, "sound_generation", ".wav") + filePath := filepath.Join(appConfig.AudioDir, fileName) + + res, err := soundGenModel.SoundGeneration(context.Background(), &proto.SoundGenerationRequest{ + Text: text, + Model: modelFile, + Dst: filePath, + Sample: doSample, + Duration: duration, + Temperature: temperature, + Src: sourceFile, + SrcDivisor: sourceDivisor, + }) + + // return RPC error if any + if !res.Success { + return "", nil, fmt.Errorf(res.Message) + } + + return filePath, res, err +} diff --git a/core/backend/tts.go b/core/backend/tts.go index ced73e13..13a851ba 100644 --- a/core/backend/tts.go +++ b/core/backend/tts.go @@ -9,31 +9,15 @@ import ( "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/pkg/grpc/proto" - model "github.com/mudler/LocalAI/pkg/model" + "github.com/mudler/LocalAI/pkg/model" "github.com/mudler/LocalAI/pkg/utils" ) -func generateUniqueFileName(dir, baseName, ext string) string { - counter := 1 - fileName := baseName + ext - - for { - filePath := filepath.Join(dir, fileName) - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return fileName - } - - counter++ - fileName = fmt.Sprintf("%s_%d%s", baseName, counter, ext) - } -} - func ModelTTS( backend, text, modelFile, - voice , + voice, language string, loader *model.ModelLoader, appConfig *config.ApplicationConfig, @@ -66,7 +50,7 @@ func ModelTTS( return "", nil, fmt.Errorf("failed creating audio directory: %s", err) } - fileName := generateUniqueFileName(appConfig.AudioDir, "tts", ".wav") + fileName := utils.GenerateUniqueFileName(appConfig.AudioDir, "tts", ".wav") filePath := filepath.Join(appConfig.AudioDir, fileName) // If the model file is not empty, we pass it joined with the model path @@ -88,10 +72,10 @@ func ModelTTS( } res, err := ttsModel.TTS(context.Background(), &proto.TTSRequest{ - Text: text, - Model: modelPath, - Voice: voice, - Dst: filePath, + Text: text, + Model: modelPath, + Voice: voice, + Dst: filePath, Language: &language, }) diff --git a/core/cli/cli.go b/core/cli/cli.go index 2073778d..aed75d8a 100644 --- a/core/cli/cli.go +++ b/core/cli/cli.go @@ -8,12 +8,13 @@ import ( var CLI struct { cliContext.Context `embed:""` - Run RunCMD `cmd:"" help:"Run LocalAI, this the default command if no other command is specified. Run 'local-ai run --help' for more information" default:"withargs"` - Federated FederatedCLI `cmd:"" help:"Run LocalAI in federated mode"` - Models ModelsCMD `cmd:"" help:"Manage LocalAI models and definitions"` - TTS TTSCMD `cmd:"" help:"Convert text to speech"` - Transcript TranscriptCMD `cmd:"" help:"Convert audio to text"` - Worker worker.Worker `cmd:"" help:"Run workers to distribute workload (llama.cpp-only)"` - Util UtilCMD `cmd:"" help:"Utility commands"` - Explorer ExplorerCMD `cmd:"" help:"Run p2p explorer"` + Run RunCMD `cmd:"" help:"Run LocalAI, this the default command if no other command is specified. Run 'local-ai run --help' for more information" default:"withargs"` + Federated FederatedCLI `cmd:"" help:"Run LocalAI in federated mode"` + Models ModelsCMD `cmd:"" help:"Manage LocalAI models and definitions"` + TTS TTSCMD `cmd:"" help:"Convert text to speech"` + SoundGeneration SoundGenerationCMD `cmd:"" help:"Generates audio files from text or audio"` + Transcript TranscriptCMD `cmd:"" help:"Convert audio to text"` + Worker worker.Worker `cmd:"" help:"Run workers to distribute workload (llama.cpp-only)"` + Util UtilCMD `cmd:"" help:"Utility commands"` + Explorer ExplorerCMD `cmd:"" help:"Run p2p explorer"` } diff --git a/core/cli/soundgeneration.go b/core/cli/soundgeneration.go new file mode 100644 index 00000000..5711b199 --- /dev/null +++ b/core/cli/soundgeneration.go @@ -0,0 +1,110 @@ +package cli + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/mudler/LocalAI/core/backend" + cliContext "github.com/mudler/LocalAI/core/cli/context" + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/pkg/model" + "github.com/rs/zerolog/log" +) + +type SoundGenerationCMD struct { + Text []string `arg:""` + + Backend string `short:"b" required:"" help:"Backend to run the SoundGeneration model"` + Model string `short:"m" required:"" help:"Model name to run the SoundGeneration"` + Duration string `short:"d" help:"If specified, the length of audio to generate in seconds"` + Temperature string `short:"t" help:"If specified, the temperature of the generation"` + InputFile string `short:"i" help:"If specified, the input file to condition generation upon"` + InputFileSampleDivisor string `short:"f" help:"If InputFile and this divisor is specified, the first portion of the sample file will be used"` + DoSample bool `short:"s" default:"true" help:"Enables sampling from the model. Better quality at the cost of speed. Defaults to enabled."` + OutputFile string `short:"o" type:"path" help:"The path to write the output wav file"` + ModelsPath string `env:"LOCALAI_MODELS_PATH,MODELS_PATH" type:"path" default:"${basepath}/models" help:"Path containing models used for inferencing" group:"storage"` + BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"` + ExternalGRPCBackends []string `env:"LOCALAI_EXTERNAL_GRPC_BACKENDS,EXTERNAL_GRPC_BACKENDS" help:"A list of external grpc backends" group:"backends"` +} + +func parseToFloat32Ptr(input string) *float32 { + f, err := strconv.ParseFloat(input, 32) + if err != nil { + return nil + } + f2 := float32(f) + return &f2 +} + +func parseToInt32Ptr(input string) *int32 { + i, err := strconv.ParseInt(input, 10, 32) + if err != nil { + return nil + } + i2 := int32(i) + return &i2 +} + +func (t *SoundGenerationCMD) Run(ctx *cliContext.Context) error { + outputFile := t.OutputFile + outputDir := t.BackendAssetsPath + if outputFile != "" { + outputDir = filepath.Dir(outputFile) + } + + text := strings.Join(t.Text, " ") + + externalBackends := make(map[string]string) + // split ":" to get backend name and the uri + for _, v := range t.ExternalGRPCBackends { + backend := v[:strings.IndexByte(v, ':')] + uri := v[strings.IndexByte(v, ':')+1:] + externalBackends[backend] = uri + fmt.Printf("TMP externalBackends[%q]=%q\n\n", backend, uri) + } + + opts := &config.ApplicationConfig{ + ModelPath: t.ModelsPath, + Context: context.Background(), + AudioDir: outputDir, + AssetsDestination: t.BackendAssetsPath, + ExternalGRPCBackends: externalBackends, + } + ml := model.NewModelLoader(opts.ModelPath) + + defer func() { + err := ml.StopAllGRPC() + if err != nil { + log.Error().Err(err).Msg("unable to stop all grpc processes") + } + }() + + options := config.BackendConfig{} + options.SetDefaults() + + var inputFile *string + if t.InputFile != "" { + inputFile = &t.InputFile + } + + filePath, _, err := backend.SoundGeneration(t.Backend, t.Model, text, + parseToFloat32Ptr(t.Duration), parseToFloat32Ptr(t.Temperature), &t.DoSample, + inputFile, parseToInt32Ptr(t.InputFileSampleDivisor), ml, opts, options) + + if err != nil { + return err + } + if outputFile != "" { + if err := os.Rename(filePath, outputFile); err != nil { + return err + } + fmt.Printf("Generate file %s\n", outputFile) + } else { + fmt.Printf("Generate file %s\n", filePath) + } + return nil +} diff --git a/core/http/endpoints/elevenlabs/soundgeneration.go b/core/http/endpoints/elevenlabs/soundgeneration.go new file mode 100644 index 00000000..619544d8 --- /dev/null +++ b/core/http/endpoints/elevenlabs/soundgeneration.go @@ -0,0 +1,65 @@ +package elevenlabs + +import ( + "github.com/gofiber/fiber/v2" + "github.com/mudler/LocalAI/core/backend" + "github.com/mudler/LocalAI/core/config" + fiberContext "github.com/mudler/LocalAI/core/http/ctx" + "github.com/mudler/LocalAI/core/schema" + "github.com/mudler/LocalAI/pkg/model" + "github.com/rs/zerolog/log" +) + +// SoundGenerationEndpoint is the ElevenLabs SoundGeneration endpoint https://elevenlabs.io/docs/api-reference/sound-generation +// @Summary Generates audio from the input text. +// @Param request body schema.ElevenLabsSoundGenerationRequest true "query params" +// @Success 200 {string} binary "Response" +// @Router /v1/sound-generation [post] +func SoundGenerationEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error { + return func(c *fiber.Ctx) error { + input := new(schema.ElevenLabsSoundGenerationRequest) + // Get input data from the request body + if err := c.BodyParser(input); err != nil { + return err + } + + modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.ModelID, false) + if err != nil { + modelFile = input.ModelID + log.Warn().Str("ModelID", input.ModelID).Msg("Model not found in context") + } + + cfg, err := cl.LoadBackendConfigFileByName(modelFile, appConfig.ModelPath, + config.LoadOptionDebug(appConfig.Debug), + config.LoadOptionThreads(appConfig.Threads), + config.LoadOptionContextSize(appConfig.ContextSize), + config.LoadOptionF16(appConfig.F16), + ) + if err != nil { + modelFile = input.ModelID + log.Warn().Str("Request ModelID", input.ModelID).Err(err).Msg("error during LoadBackendConfigFileByName, using request ModelID") + } else { + if input.ModelID != "" { + modelFile = input.ModelID + } else { + modelFile = cfg.Model + } + } + log.Debug().Str("modelFile", "modelFile").Str("backend", cfg.Backend).Msg("Sound Generation Request about to be sent to backend") + + if input.Duration != nil { + log.Debug().Float32("duration", *input.Duration).Msg("duration set") + } + if input.Temperature != nil { + log.Debug().Float32("temperature", *input.Temperature).Msg("temperature set") + } + + // TODO: Support uploading files? + filePath, _, err := backend.SoundGeneration(cfg.Backend, modelFile, input.Text, input.Duration, input.Temperature, input.DoSample, nil, nil, ml, appConfig, *cfg) + if err != nil { + return err + } + return c.Download(filePath) + + } +} diff --git a/core/http/routes/elevenlabs.go b/core/http/routes/elevenlabs.go index 4f9e666f..b20dec75 100644 --- a/core/http/routes/elevenlabs.go +++ b/core/http/routes/elevenlabs.go @@ -16,4 +16,6 @@ func RegisterElevenLabsRoutes(app *fiber.App, // Elevenlabs app.Post("/v1/text-to-speech/:voice-id", auth, elevenlabs.TTSEndpoint(cl, ml, appConfig)) + app.Post("/v1/sound-generation", auth, elevenlabs.SoundGenerationEndpoint(cl, ml, appConfig)) + } diff --git a/core/schema/elevenlabs.go b/core/schema/elevenlabs.go index 8bd6be3b..119e0a58 100644 --- a/core/schema/elevenlabs.go +++ b/core/schema/elevenlabs.go @@ -4,3 +4,11 @@ type ElevenLabsTTSRequest struct { Text string `json:"text" yaml:"text"` ModelID string `json:"model_id" yaml:"model_id"` } + +type ElevenLabsSoundGenerationRequest struct { + Text string `json:"text" yaml:"text"` + ModelID string `json:"model_id" yaml:"model_id"` + Duration *float32 `json:"duration_seconds,omitempty" yaml:"duration_seconds,omitempty"` + Temperature *float32 `json:"prompt_influence,omitempty" yaml:"prompt_influence,omitempty"` + DoSample *bool `json:"do_sample,omitempty" yaml:"do_sample,omitempty"` +} diff --git a/examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru b/examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru new file mode 100644 index 00000000..471756f5 --- /dev/null +++ b/examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru @@ -0,0 +1,23 @@ +meta { + name: musicgen + type: http + seq: 1 +} + +post { + url: {{PROTOCOL}}{{HOST}}:{{PORT}}/v1/sound-generation + body: json + auth: none +} + +headers { + Content-Type: application/json +} + +body:json { + { + "model_id": "facebook/musicgen-small", + "text": "Exciting 80s Newscast Interstitial", + "duration_seconds": 8 + } +} diff --git a/go.sum b/go.sum index e09af5ce..85800fd6 100644 --- a/go.sum +++ b/go.sum @@ -509,6 +509,9 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= diff --git a/pkg/grpc/backend.go b/pkg/grpc/backend.go index 596a7589..5abc34ab 100644 --- a/pkg/grpc/backend.go +++ b/pkg/grpc/backend.go @@ -41,6 +41,7 @@ type Backend interface { PredictStream(ctx context.Context, in *pb.PredictOptions, f func(s []byte), opts ...grpc.CallOption) error GenerateImage(ctx context.Context, in *pb.GenerateImageRequest, opts ...grpc.CallOption) (*pb.Result, error) TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOption) (*pb.Result, error) + SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest, opts ...grpc.CallOption) (*pb.Result, error) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.TranscriptionResult, error) TokenizeString(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.TokenizationResponse, error) Status(ctx context.Context) (*pb.StatusResponse, error) diff --git a/pkg/grpc/base/base.go b/pkg/grpc/base/base.go index 515022ec..21dd1578 100644 --- a/pkg/grpc/base/base.go +++ b/pkg/grpc/base/base.go @@ -61,6 +61,10 @@ func (llm *Base) TTS(*pb.TTSRequest) error { return fmt.Errorf("unimplemented") } +func (llm *Base) SoundGeneration(*pb.SoundGenerationRequest) error { + return fmt.Errorf("unimplemented") +} + func (llm *Base) TokenizeString(opts *pb.PredictOptions) (pb.TokenizationResponse, error) { return pb.TokenizationResponse{}, fmt.Errorf("unimplemented") } diff --git a/pkg/grpc/client.go b/pkg/grpc/client.go index cfae5875..827275cf 100644 --- a/pkg/grpc/client.go +++ b/pkg/grpc/client.go @@ -210,6 +210,26 @@ func (c *Client) TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOp return client.TTS(ctx, in, opts...) } +func (c *Client) SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest, opts ...grpc.CallOption) (*pb.Result, error) { + if !c.parallel { + c.opMutex.Lock() + defer c.opMutex.Unlock() + } + c.setBusy(true) + defer c.setBusy(false) + if c.wd != nil { + c.wd.Mark(c.address) + defer c.wd.UnMark(c.address) + } + conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + defer conn.Close() + client := pb.NewBackendClient(conn) + return client.SoundGeneration(ctx, in, opts...) +} + func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.TranscriptionResult, error) { if !c.parallel { c.opMutex.Lock() diff --git a/pkg/grpc/embed.go b/pkg/grpc/embed.go index 2b776b39..67d83e27 100644 --- a/pkg/grpc/embed.go +++ b/pkg/grpc/embed.go @@ -53,6 +53,10 @@ func (e *embedBackend) TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc. return e.s.TTS(ctx, in) } +func (e *embedBackend) SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest, opts ...grpc.CallOption) (*pb.Result, error) { + return e.s.SoundGeneration(ctx, in) +} + func (e *embedBackend) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.TranscriptionResult, error) { r, err := e.s.AudioTranscription(ctx, in) if err != nil { diff --git a/pkg/grpc/interface.go b/pkg/grpc/interface.go index 313c8ff5..731dcd5b 100644 --- a/pkg/grpc/interface.go +++ b/pkg/grpc/interface.go @@ -17,6 +17,7 @@ type LLM interface { GenerateImage(*pb.GenerateImageRequest) error AudioTranscription(*pb.TranscriptRequest) (schema.TranscriptionResult, error) TTS(*pb.TTSRequest) error + SoundGeneration(*pb.SoundGenerationRequest) error TokenizeString(*pb.PredictOptions) (pb.TokenizationResponse, error) Status() (pb.StatusResponse, error) diff --git a/pkg/grpc/server.go b/pkg/grpc/server.go index 784aac7f..0e602a42 100644 --- a/pkg/grpc/server.go +++ b/pkg/grpc/server.go @@ -84,7 +84,19 @@ func (s *server) TTS(ctx context.Context, in *pb.TTSRequest) (*pb.Result, error) if err != nil { return &pb.Result{Message: fmt.Sprintf("Error generating audio: %s", err.Error()), Success: false}, err } - return &pb.Result{Message: "Audio generated", Success: true}, nil + return &pb.Result{Message: "TTS audio generated", Success: true}, nil +} + +func (s *server) SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest) (*pb.Result, error) { + if s.llm.Locking() { + s.llm.Lock() + defer s.llm.Unlock() + } + err := s.llm.SoundGeneration(in) + if err != nil { + return &pb.Result{Message: fmt.Sprintf("Error generating audio: %s", err.Error()), Success: false}, err + } + return &pb.Result{Message: "Sound Generation audio generated", Success: true}, nil } func (s *server) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest) (*pb.TranscriptResult, error) { diff --git a/pkg/utils/path.go b/pkg/utils/path.go index c1d3e86d..1ae11d12 100644 --- a/pkg/utils/path.go +++ b/pkg/utils/path.go @@ -38,3 +38,19 @@ func SanitizeFileName(fileName string) string { safeName := strings.ReplaceAll(baseName, "..", "") return safeName } + +func GenerateUniqueFileName(dir, baseName, ext string) string { + counter := 1 + fileName := baseName + ext + + for { + filePath := filepath.Join(dir, fileName) + _, err := os.Stat(filePath) + if os.IsNotExist(err) { + return fileName + } + + counter++ + fileName = fmt.Sprintf("%s_%d%s", baseName, counter, ext) + } +} From 0762aa532710b86c326d6e08a84d6a68b339a702 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 24 Aug 2024 09:58:49 +0200 Subject: [PATCH 0008/1530] Update GPU-acceleration.md Signed-off-by: Ettore Di Giacinto --- docs/content/docs/features/GPU-acceleration.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/content/docs/features/GPU-acceleration.md b/docs/content/docs/features/GPU-acceleration.md index ae146ca6..c4160738 100644 --- a/docs/content/docs/features/GPU-acceleration.md +++ b/docs/content/docs/features/GPU-acceleration.md @@ -133,6 +133,10 @@ Due to the nature of ROCm it is best to run all implementations in containers as Ongoing verification testing of ROCm compatability with integrated backends. Please note the following list of verified backends and devices. +LocalAI hipblas images are built against the following targets: gfx900,gfx906,gfx908,gfx940,gfx941,gfx942,gfx90a,gfx1030,gfx1031,gfx1100,gfx1101 + +If your device is not one of these you must specify the corresponding `GPU_TARGETS` and specify `REBUILD=true`. Otherwise you don't need to specify these in the commands below. + ### Verified The devices in the following list have been tested with `hipblas` images running `ROCm 6.0.0` From ce827139bb35bad5bbbd8d3cd1104fd6aac96c5d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 24 Aug 2024 10:30:24 +0200 Subject: [PATCH 0009/1530] fix(p2p): correctly allow to pass extra args to llama.cpp (#3368) Signed-off-by: Ettore Di Giacinto --- core/cli/worker/worker_p2p.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/cli/worker/worker_p2p.go b/core/cli/worker/worker_p2p.go index a65d3381..adfd2819 100644 --- a/core/cli/worker/worker_p2p.go +++ b/core/cli/worker/worker_p2p.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "os/exec" + "strings" "time" cliContext "github.com/mudler/LocalAI/core/cli/context" @@ -20,12 +21,12 @@ import ( type P2P struct { WorkerFlags `embed:""` - Token string `env:"LOCALAI_TOKEN,LOCALAI_P2P_TOKEN,TOKEN" help:"P2P token to use"` - NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"` - RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"` - RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"` - ExtraLLamaCPPArgs []string `env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"` - Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"` + Token string `env:"LOCALAI_TOKEN,LOCALAI_P2P_TOKEN,TOKEN" help:"P2P token to use"` + NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"` + RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"` + RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"` + ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"` + Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"` } func (r *P2P) Run(ctx *cliContext.Context) error { @@ -76,8 +77,8 @@ func (r *P2P) Run(ctx *cliContext.Context) error { "util", "llama-cpp-rpc-server", ) - - args := append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...) + extraArgs := strings.Split(r.ExtraLLamaCPPArgs, " ") + args := append([]string{"--host", address, "--port", fmt.Sprint(port)}, extraArgs...) args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess) cmd := exec.Command( From de1fbdca7173e2924801286484cee1e9eb6e2060 Mon Sep 17 00:00:00 2001 From: grant-wilson Date: Sat, 24 Aug 2024 17:01:34 -0400 Subject: [PATCH 0010/1530] Update quickstart.md (#3373) fix typo. Signed-off-by: grant-wilson --- docs/content/docs/getting-started/quickstart.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/docs/getting-started/quickstart.md b/docs/content/docs/getting-started/quickstart.md index 5defa7fc..9ccc0faa 100644 --- a/docs/content/docs/getting-started/quickstart.md +++ b/docs/content/docs/getting-started/quickstart.md @@ -12,7 +12,7 @@ icon = "rocket_launch" **Security considerations** -If you are exposing LocalAI remotely, make sure you protect the API endpoints adeguately with a mechanism which allows to protect from the incoming traffic or alternatively, run LocalAI with `API_KEY` to gate the access with an API key. The API key guarantees a total access to the features (there is no role separation), and it is to be considered as likely as an admin role. +If you are exposing LocalAI remotely, make sure you protect the API endpoints adequately with a mechanism which allows to protect from the incoming traffic or alternatively, run LocalAI with `API_KEY` to gate the access with an API key. The API key guarantees a total access to the features (there is no role separation), and it is to be considered as likely as an admin role. To access the WebUI with an API_KEY, browser extensions such as [Requestly](https://requestly.com/) can be used (see also https://github.com/mudler/LocalAI/issues/2227#issuecomment-2093333752). See also [API flags]({{% relref "docs/advanced/advanced-usage#api-flags" %}}) for the flags / options available when starting LocalAI. From 75ef6ccf1e21fd66908788db37075086fc530885 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 24 Aug 2024 23:53:18 +0200 Subject: [PATCH 0011/1530] feat(swagger): update swagger (#3370) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- swagger/docs.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ swagger/swagger.json | 44 ++++++++++++++++++++++++++++++++++++++++++++ swagger/swagger.yaml | 28 ++++++++++++++++++++++++++++ 3 files changed, 116 insertions(+) diff --git a/swagger/docs.go b/swagger/docs.go index 44ae10ad..ced239c4 100644 --- a/swagger/docs.go +++ b/swagger/docs.go @@ -656,6 +656,30 @@ const docTemplate = `{ } } }, + "/v1/sound-generation": { + "post": { + "summary": "Generates audio from the input text.", + "parameters": [ + { + "description": "query params", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/schema.ElevenLabsSoundGenerationRequest" + } + } + ], + "responses": { + "200": { + "description": "Response", + "schema": { + "type": "string" + } + } + } + } + }, "/v1/text-to-speech/{voice-id}": { "post": { "summary": "Generates audio from the input text.", @@ -1161,6 +1185,26 @@ const docTemplate = `{ } } }, + "schema.ElevenLabsSoundGenerationRequest": { + "type": "object", + "properties": { + "do_sample": { + "type": "boolean" + }, + "duration_seconds": { + "type": "number" + }, + "model_id": { + "type": "string" + }, + "prompt_influence": { + "type": "number" + }, + "text": { + "type": "string" + } + } + }, "schema.File": { "type": "object", "properties": { diff --git a/swagger/swagger.json b/swagger/swagger.json index 6edfebbd..c538b539 100644 --- a/swagger/swagger.json +++ b/swagger/swagger.json @@ -649,6 +649,30 @@ } } }, + "/v1/sound-generation": { + "post": { + "summary": "Generates audio from the input text.", + "parameters": [ + { + "description": "query params", + "name": "request", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/schema.ElevenLabsSoundGenerationRequest" + } + } + ], + "responses": { + "200": { + "description": "Response", + "schema": { + "type": "string" + } + } + } + } + }, "/v1/text-to-speech/{voice-id}": { "post": { "summary": "Generates audio from the input text.", @@ -1154,6 +1178,26 @@ } } }, + "schema.ElevenLabsSoundGenerationRequest": { + "type": "object", + "properties": { + "do_sample": { + "type": "boolean" + }, + "duration_seconds": { + "type": "number" + }, + "model_id": { + "type": "string" + }, + "prompt_influence": { + "type": "number" + }, + "text": { + "type": "string" + } + } + }, "schema.File": { "type": "object", "properties": { diff --git a/swagger/swagger.yaml b/swagger/swagger.yaml index c953b0af..389543fa 100644 --- a/swagger/swagger.yaml +++ b/swagger/swagger.yaml @@ -322,6 +322,19 @@ definitions: object: type: string type: object + schema.ElevenLabsSoundGenerationRequest: + properties: + do_sample: + type: boolean + duration_seconds: + type: number + model_id: + type: string + prompt_influence: + type: number + text: + type: string + type: object schema.File: properties: bytes: @@ -1066,6 +1079,21 @@ paths: schema: $ref: '#/definitions/schema.JINARerankResponse' summary: Reranks a list of phrases by relevance to a given text query. + /v1/sound-generation: + post: + parameters: + - description: query params + in: body + name: request + required: true + schema: + $ref: '#/definitions/schema.ElevenLabsSoundGenerationRequest' + responses: + "200": + description: Response + schema: + type: string + summary: Generates audio from the input text. /v1/text-to-speech/{voice-id}: post: parameters: From 99b57b321bef2b9a4830729d93155ca758681c8f Mon Sep 17 00:00:00 2001 From: Dave Date: Sat, 24 Aug 2024 18:42:05 -0400 Subject: [PATCH 0012/1530] fix: devcontainer utils.sh ssh copy improvements (#3372) fix utils.sh - use HOME variable, permissions and logging Signed-off-by: Dave Lee --- .devcontainer-scripts/utils.sh | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.devcontainer-scripts/utils.sh b/.devcontainer-scripts/utils.sh index 7018c745..98ac063c 100644 --- a/.devcontainer-scripts/utils.sh +++ b/.devcontainer-scripts/utils.sh @@ -32,19 +32,22 @@ config_remote() { } # Setup special .ssh files -# +# Prints out lines of text to make things pretty # Param 1: bash array, filenames relative to the customization directory that should be copied to ~/.ssh setup_ssh() { - mkdir -p ~/.ssh + echo "starting ~/.ssh directory setup..." + mkdir -p "${HOME}.ssh" + chmod 0700 "${HOME}/.ssh" + echo "-----" local files=("$@") for file in "${files[@]}" ; do local cfile="/devcontainer-customization/${file}" - local hfile="~/.ssh/${file}" + local hfile="${HOME}/.ssh/${file}" if [ ! -f "${hfile}" ]; then - echo "copying ${file}" + echo "copying \"${file}\"" cp "${cfile}" "${hfile}" chmod 600 "${hfile}" fi done - ls ~/.ssh + echo "~/.ssh directory setup complete!" } From 771a052480fcb6cb3a365a4dc0d2715e36c2e819 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 25 Aug 2024 09:02:54 +0200 Subject: [PATCH 0013/1530] models(gallery): add phi-3.5 (#3376) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 694df8d3..d46da159 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4573,7 +4573,7 @@ - filename: phillama-3.8b-v0.1.Q4_K_M.gguf sha256: da537d352b7aae54bbad0d2cff3e3a1b0e1dc1e1d25bec3aae1d05cf4faee7a2 uri: huggingface://RichardErkhov/raincandy-u_-_phillama-3.8b-v0.1-gguf/phillama-3.8b-v0.1.Q4_K_M.gguf -- !!merge <<: *llama3 +- !!merge <<: *phi-3 name: "calme-2.3-phi3-4b" icon: https://huggingface.co/MaziyarPanahi/calme-2.1-phi3-4b/resolve/main/phi-3-instruct.webp urls: @@ -4590,6 +4590,20 @@ - filename: Phi-3-mini-4k-instruct-v0.3.Q4_K_M.gguf sha256: 3a23e1052369c080afb925882bd814cbea5ec859894655a7434c3d49e43a6127 uri: huggingface://MaziyarPanahi/calme-2.3-phi3-4b-GGUF/Phi-3-mini-4k-instruct-v0.3.Q4_K_M.gguf +- !!merge <<: *phi-3 + name: "phi-3.5-mini-instruct" + urls: + - https://huggingface.co/microsoft/Phi-3.5-mini-instruct + - https://huggingface.co/MaziyarPanahi/Phi-3.5-mini-instruct-GGUF + description: | + Phi-3.5-mini is a lightweight, state-of-the-art open model built upon datasets used for Phi-3 - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data. The model belongs to the Phi-3 model family and supports 128K token context length. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures. + overrides: + parameters: + model: Phi-3.5-mini-instruct.Q4_K_M.gguf + files: + - filename: Phi-3.5-mini-instruct.Q4_K_M.gguf + sha256: 3f68916e850b107d8641d18bcd5548f0d66beef9e0a9077fe84ef28943eb7e88 + uri: huggingface://MaziyarPanahi/Phi-3.5-mini-instruct-GGUF/Phi-3.5-mini-instruct.Q4_K_M.gguf - &hermes-2-pro-mistral ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From 7f069544257c8a8b39b744e949ba51cb40bb4b5b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 25 Aug 2024 14:36:09 +0200 Subject: [PATCH 0014/1530] fix(model-loading): keep track of open GRPC Clients (#3377) Due to a previous refactor we moved the client constructor tight to the model address, however that was just a string which we would use to build the client each time. With this change we make the loader to return a *Model which carries a constructor for the client and stores the client on the first connection. Signed-off-by: Ettore Di Giacinto --- core/services/backend_monitor.go | 2 +- pkg/grpc/backend.go | 4 +- pkg/grpc/client.go | 76 ++++++----- pkg/model/initializers.go | 55 ++++---- pkg/model/loader.go | 125 ++++-------------- pkg/model/model.go | 29 ++++ pkg/model/process.go | 2 +- pkg/model/template.go | 52 ++++++++ .../{loader_test.go => template_test.go} | 0 pkg/model/watchdog.go | 2 +- 10 files changed, 176 insertions(+), 171 deletions(-) create mode 100644 pkg/model/model.go create mode 100644 pkg/model/template.go rename pkg/model/{loader_test.go => template_test.go} (100%) diff --git a/core/services/backend_monitor.go b/core/services/backend_monitor.go index 39588604..88fefa09 100644 --- a/core/services/backend_monitor.go +++ b/core/services/backend_monitor.go @@ -107,7 +107,7 @@ func (bms BackendMonitorService) CheckAndSample(modelName string) (*proto.Status return nil, err } modelAddr := bms.modelLoader.CheckIsLoaded(backendId) - if modelAddr == "" { + if modelAddr == nil { return nil, fmt.Errorf("backend %s is not currently loaded", backendId) } diff --git a/pkg/grpc/backend.go b/pkg/grpc/backend.go index 5abc34ab..3821678c 100644 --- a/pkg/grpc/backend.go +++ b/pkg/grpc/backend.go @@ -18,10 +18,10 @@ func NewClient(address string, parallel bool, wd WatchDog, enableWatchDog bool) if bc, ok := embeds[address]; ok { return bc } - return NewGrpcClient(address, parallel, wd, enableWatchDog) + return buildClient(address, parallel, wd, enableWatchDog) } -func NewGrpcClient(address string, parallel bool, wd WatchDog, enableWatchDog bool) Backend { +func buildClient(address string, parallel bool, wd WatchDog, enableWatchDog bool) Backend { if !enableWatchDog { wd = nil } diff --git a/pkg/grpc/client.go b/pkg/grpc/client.go index 827275cf..b654e9c9 100644 --- a/pkg/grpc/client.go +++ b/pkg/grpc/client.go @@ -39,6 +39,18 @@ func (c *Client) setBusy(v bool) { c.Unlock() } +func (c *Client) wdMark() { + if c.wd != nil { + c.wd.Mark(c.address) + } +} + +func (c *Client) wdUnMark() { + if c.wd != nil { + c.wd.UnMark(c.address) + } +} + func (c *Client) HealthCheck(ctx context.Context) (bool, error) { if !c.parallel { c.opMutex.Lock() @@ -76,10 +88,8 @@ func (c *Client) Embeddings(ctx context.Context, in *pb.PredictOptions, opts ... } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -97,10 +107,8 @@ func (c *Client) Predict(ctx context.Context, in *pb.PredictOptions, opts ...grp } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -118,10 +126,8 @@ func (c *Client) LoadModel(ctx context.Context, in *pb.ModelOptions, opts ...grp } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -138,10 +144,8 @@ func (c *Client) PredictStream(ctx context.Context, in *pb.PredictOptions, f fun } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return err @@ -177,10 +181,8 @@ func (c *Client) GenerateImage(ctx context.Context, in *pb.GenerateImageRequest, } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -197,10 +199,8 @@ func (c *Client) TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOp } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -217,10 +217,8 @@ func (c *Client) SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequ } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -237,10 +235,8 @@ func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptReques } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -277,10 +273,8 @@ func (c *Client) TokenizeString(ctx context.Context, in *pb.PredictOptions, opts } c.setBusy(true) defer c.setBusy(false) - if c.wd != nil { - c.wd.Mark(c.address) - defer c.wd.UnMark(c.address) - } + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -319,6 +313,8 @@ func (c *Client) StoresSet(ctx context.Context, in *pb.StoresSetOptions, opts .. } c.setBusy(true) defer c.setBusy(false) + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -333,6 +329,8 @@ func (c *Client) StoresDelete(ctx context.Context, in *pb.StoresDeleteOptions, o c.opMutex.Lock() defer c.opMutex.Unlock() } + c.wdMark() + defer c.wdUnMark() c.setBusy(true) defer c.setBusy(false) conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) @@ -351,6 +349,8 @@ func (c *Client) StoresGet(ctx context.Context, in *pb.StoresGetOptions, opts .. } c.setBusy(true) defer c.setBusy(false) + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -367,6 +367,8 @@ func (c *Client) StoresFind(ctx context.Context, in *pb.StoresFindOptions, opts } c.setBusy(true) defer c.setBusy(false) + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err @@ -383,6 +385,8 @@ func (c *Client) Rerank(ctx context.Context, in *pb.RerankRequest, opts ...grpc. } c.setBusy(true) defer c.setBusy(false) + c.wdMark() + defer c.wdUnMark() conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index d85da6c1..de0662e6 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -80,6 +80,9 @@ ENTRY: if e.IsDir() { continue } + if strings.HasSuffix(e.Name(), ".log") { + continue + } // Skip the llama.cpp variants if we are autoDetecting // But we always load the fallback variant if it exists @@ -265,12 +268,12 @@ func selectGRPCProcess(backend, assetDir string, f16 bool) string { // starts the grpcModelProcess for the backend, and returns a grpc client // It also loads the model -func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string) (ModelAddress, error) { - return func(modelName, modelFile string) (ModelAddress, error) { +func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string) (*Model, error) { + return func(modelName, modelFile string) (*Model, error) { log.Debug().Msgf("Loading Model %s with gRPC (file: %s) (backend: %s): %+v", modelName, modelFile, backend, *o) - var client ModelAddress + var client *Model getFreeAddress := func() (string, error) { port, err := freeport.GetFreePort() @@ -298,26 +301,26 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string log.Debug().Msgf("external backend is file: %+v", fi) serverAddress, err := getFreeAddress() if err != nil { - return "", fmt.Errorf("failed allocating free ports: %s", err.Error()) + return nil, fmt.Errorf("failed allocating free ports: %s", err.Error()) } // Make sure the process is executable if err := ml.startProcess(uri, o.model, serverAddress); err != nil { log.Error().Err(err).Str("path", uri).Msg("failed to launch ") - return "", err + return nil, err } log.Debug().Msgf("GRPC Service Started") - client = ModelAddress(serverAddress) + client = NewModel(serverAddress) } else { log.Debug().Msg("external backend is uri") // address - client = ModelAddress(uri) + client = NewModel(uri) } } else { grpcProcess := backendPath(o.assetDir, backend) if err := utils.VerifyPath(grpcProcess, o.assetDir); err != nil { - return "", fmt.Errorf("grpc process not found in assetdir: %s", err.Error()) + return nil, fmt.Errorf("grpc process not found in assetdir: %s", err.Error()) } if autoDetect { @@ -329,12 +332,12 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string // Check if the file exists if _, err := os.Stat(grpcProcess); os.IsNotExist(err) { - return "", fmt.Errorf("grpc process not found: %s. some backends(stablediffusion, tts) require LocalAI compiled with GO_TAGS", grpcProcess) + return nil, fmt.Errorf("grpc process not found: %s. some backends(stablediffusion, tts) require LocalAI compiled with GO_TAGS", grpcProcess) } serverAddress, err := getFreeAddress() if err != nil { - return "", fmt.Errorf("failed allocating free ports: %s", err.Error()) + return nil, fmt.Errorf("failed allocating free ports: %s", err.Error()) } args := []string{} @@ -344,12 +347,12 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string // Make sure the process is executable in any circumstance if err := ml.startProcess(grpcProcess, o.model, serverAddress, args...); err != nil { - return "", err + return nil, err } log.Debug().Msgf("GRPC Service Started") - client = ModelAddress(serverAddress) + client = NewModel(serverAddress) } // Wait for the service to start up @@ -369,7 +372,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string if !ready { log.Debug().Msgf("GRPC Service NOT ready") - return "", fmt.Errorf("grpc service not ready") + return nil, fmt.Errorf("grpc service not ready") } options := *o.gRPCOptions @@ -380,27 +383,16 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string res, err := client.GRPC(o.parallelRequests, ml.wd).LoadModel(o.context, &options) if err != nil { - return "", fmt.Errorf("could not load model: %w", err) + return nil, fmt.Errorf("could not load model: %w", err) } if !res.Success { - return "", fmt.Errorf("could not load model (no success): %s", res.Message) + return nil, fmt.Errorf("could not load model (no success): %s", res.Message) } return client, nil } } -func (ml *ModelLoader) resolveAddress(addr ModelAddress, parallel bool) (grpc.Backend, error) { - if parallel { - return addr.GRPC(parallel, ml.wd), nil - } - - if _, ok := ml.grpcClients[string(addr)]; !ok { - ml.grpcClients[string(addr)] = addr.GRPC(parallel, ml.wd) - } - return ml.grpcClients[string(addr)], nil -} - func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err error) { o := NewOptions(opts...) @@ -425,7 +417,6 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel") return nil, err } - } var backendToConsume string @@ -438,26 +429,28 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e backendToConsume = backend } - addr, err := ml.LoadModel(o.model, ml.grpcModel(backendToConsume, o)) + model, err := ml.LoadModel(o.model, ml.grpcModel(backendToConsume, o)) if err != nil { return nil, err } - return ml.resolveAddress(addr, o.parallelRequests) + return model.GRPC(o.parallelRequests, ml.wd), nil } func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { o := NewOptions(opts...) ml.mu.Lock() + // Return earlier if we have a model already loaded // (avoid looping through all the backends) - if m := ml.CheckIsLoaded(o.model); m != "" { + if m := ml.CheckIsLoaded(o.model); m != nil { log.Debug().Msgf("Model '%s' already loaded", o.model) ml.mu.Unlock() - return ml.resolveAddress(m, o.parallelRequests) + return m.GRPC(o.parallelRequests, ml.wd), nil } + // If we can have only one backend active, kill all the others (except external backends) if o.singleActiveBackend { log.Debug().Msgf("Stopping all backends except '%s'", o.model) diff --git a/pkg/model/loader.go b/pkg/model/loader.go index b2570c71..c1ed01dc 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -10,67 +10,28 @@ import ( "github.com/mudler/LocalAI/pkg/templates" - "github.com/mudler/LocalAI/pkg/functions" - "github.com/mudler/LocalAI/pkg/grpc" "github.com/mudler/LocalAI/pkg/utils" process "github.com/mudler/go-processmanager" "github.com/rs/zerolog/log" ) -// Rather than pass an interface{} to the prompt template: -// These are the definitions of all possible variables LocalAI will currently populate for use in a prompt template file -// Please note: Not all of these are populated on every endpoint - your template should either be tested for each endpoint you map it to, or tolerant of zero values. -type PromptTemplateData struct { - SystemPrompt string - SuppressSystemPrompt bool // used by chat specifically to indicate that SystemPrompt above should be _ignored_ - Input string - Instruction string - Functions []functions.Function - MessageIndex int -} - -type ChatMessageTemplateData struct { - SystemPrompt string - Role string - RoleName string - FunctionName string - Content string - MessageIndex int - Function bool - FunctionCall interface{} - LastMessage bool -} - // new idea: what if we declare a struct of these here, and use a loop to check? // TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we seperate directories for .bin/.yaml and .tmpl type ModelLoader struct { - ModelPath string - mu sync.Mutex - // TODO: this needs generics - grpcClients map[string]grpc.Backend - models map[string]ModelAddress + ModelPath string + mu sync.Mutex + models map[string]*Model grpcProcesses map[string]*process.Process templates *templates.TemplateCache wd *WatchDog } -type ModelAddress string - -func (m ModelAddress) GRPC(parallel bool, wd *WatchDog) grpc.Backend { - enableWD := false - if wd != nil { - enableWD = true - } - return grpc.NewClient(string(m), parallel, wd, enableWD) -} - func NewModelLoader(modelPath string) *ModelLoader { nml := &ModelLoader{ ModelPath: modelPath, - grpcClients: make(map[string]grpc.Backend), - models: make(map[string]ModelAddress), + models: make(map[string]*Model), templates: templates.NewTemplateCache(modelPath), grpcProcesses: make(map[string]*process.Process), } @@ -141,12 +102,12 @@ FILE: return models, nil } -func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) (ModelAddress, error)) (ModelAddress, error) { +func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) (*Model, error)) (*Model, error) { ml.mu.Lock() defer ml.mu.Unlock() // Check if we already have a loaded model - if model := ml.CheckIsLoaded(modelName); model != "" { + if model := ml.CheckIsLoaded(modelName); model != nil { return model, nil } @@ -156,17 +117,9 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( model, err := loader(modelName, modelFile) if err != nil { - return "", err + return nil, err } - // TODO: Add a helper method to iterate all prompt templates associated with a config if and only if it's YAML? - // Minor perf loss here until this is fixed, but we initialize on first request - - // // If there is a prompt template, load it - // if err := ml.loadTemplateIfExists(modelName); err != nil { - // return nil, err - // } - ml.models[modelName] = model return model, nil } @@ -184,55 +137,29 @@ func (ml *ModelLoader) stopModel(modelName string) error { return fmt.Errorf("model %s not found", modelName) } return nil - //return ml.deleteProcess(modelName) } -func (ml *ModelLoader) CheckIsLoaded(s string) ModelAddress { - var client grpc.Backend - if m, ok := ml.models[s]; ok { - log.Debug().Msgf("Model already loaded in memory: %s", s) - if c, ok := ml.grpcClients[s]; ok { - client = c - } else { - client = m.GRPC(false, ml.wd) - } - alive, err := client.HealthCheck(context.Background()) - if !alive { - log.Warn().Msgf("GRPC Model not responding: %s", err.Error()) - log.Warn().Msgf("Deleting the process in order to recreate it") - if !ml.grpcProcesses[s].IsAlive() { - log.Debug().Msgf("GRPC Process is not responding: %s", s) - // stop and delete the process, this forces to re-load the model and re-create again the service - err := ml.deleteProcess(s) - if err != nil { - log.Error().Err(err).Str("process", s).Msg("error stopping process") - } - return "" +func (ml *ModelLoader) CheckIsLoaded(s string) *Model { + m, ok := ml.models[s] + if !ok { + return nil + } + + log.Debug().Msgf("Model already loaded in memory: %s", s) + alive, err := m.GRPC(false, ml.wd).HealthCheck(context.Background()) + if !alive { + log.Warn().Msgf("GRPC Model not responding: %s", err.Error()) + log.Warn().Msgf("Deleting the process in order to recreate it") + if !ml.grpcProcesses[s].IsAlive() { + log.Debug().Msgf("GRPC Process is not responding: %s", s) + // stop and delete the process, this forces to re-load the model and re-create again the service + err := ml.deleteProcess(s) + if err != nil { + log.Error().Err(err).Str("process", s).Msg("error stopping process") } + return nil } - - return m } - return "" -} - -const ( - ChatPromptTemplate templates.TemplateType = iota - ChatMessageTemplate - CompletionPromptTemplate - EditPromptTemplate - FunctionsPromptTemplate -) - -func (ml *ModelLoader) EvaluateTemplateForPrompt(templateType templates.TemplateType, templateName string, in PromptTemplateData) (string, error) { - // TODO: should this check be improved? - if templateType == ChatMessageTemplate { - return "", fmt.Errorf("invalid templateType: ChatMessage") - } - return ml.templates.EvaluateTemplate(templateType, templateName, in) -} - -func (ml *ModelLoader) EvaluateTemplateForChatMessage(templateName string, messageData ChatMessageTemplateData) (string, error) { - return ml.templates.EvaluateTemplate(ChatMessageTemplate, templateName, messageData) + return m } diff --git a/pkg/model/model.go b/pkg/model/model.go new file mode 100644 index 00000000..26ddb8cc --- /dev/null +++ b/pkg/model/model.go @@ -0,0 +1,29 @@ +package model + +import grpc "github.com/mudler/LocalAI/pkg/grpc" + +type Model struct { + address string + client grpc.Backend +} + +func NewModel(address string) *Model { + return &Model{ + address: address, + } +} + +func (m *Model) GRPC(parallel bool, wd *WatchDog) grpc.Backend { + if m.client != nil { + return m.client + } + + enableWD := false + if wd != nil { + enableWD = true + } + + client := grpc.NewClient(m.address, parallel, wd, enableWD) + m.client = client + return client +} diff --git a/pkg/model/process.go b/pkg/model/process.go index 6a4fd326..5b751de8 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -33,7 +33,7 @@ func (ml *ModelLoader) StopAllExcept(s string) error { func (ml *ModelLoader) deleteProcess(s string) error { if _, exists := ml.grpcProcesses[s]; exists { if err := ml.grpcProcesses[s].Stop(); err != nil { - return err + log.Error().Err(err).Msgf("(deleteProcess) error while deleting grpc process %s", s) } } delete(ml.grpcProcesses, s) diff --git a/pkg/model/template.go b/pkg/model/template.go new file mode 100644 index 00000000..3dc850cf --- /dev/null +++ b/pkg/model/template.go @@ -0,0 +1,52 @@ +package model + +import ( + "fmt" + + "github.com/mudler/LocalAI/pkg/functions" + "github.com/mudler/LocalAI/pkg/templates" +) + +// Rather than pass an interface{} to the prompt template: +// These are the definitions of all possible variables LocalAI will currently populate for use in a prompt template file +// Please note: Not all of these are populated on every endpoint - your template should either be tested for each endpoint you map it to, or tolerant of zero values. +type PromptTemplateData struct { + SystemPrompt string + SuppressSystemPrompt bool // used by chat specifically to indicate that SystemPrompt above should be _ignored_ + Input string + Instruction string + Functions []functions.Function + MessageIndex int +} + +type ChatMessageTemplateData struct { + SystemPrompt string + Role string + RoleName string + FunctionName string + Content string + MessageIndex int + Function bool + FunctionCall interface{} + LastMessage bool +} + +const ( + ChatPromptTemplate templates.TemplateType = iota + ChatMessageTemplate + CompletionPromptTemplate + EditPromptTemplate + FunctionsPromptTemplate +) + +func (ml *ModelLoader) EvaluateTemplateForPrompt(templateType templates.TemplateType, templateName string, in PromptTemplateData) (string, error) { + // TODO: should this check be improved? + if templateType == ChatMessageTemplate { + return "", fmt.Errorf("invalid templateType: ChatMessage") + } + return ml.templates.EvaluateTemplate(templateType, templateName, in) +} + +func (ml *ModelLoader) EvaluateTemplateForChatMessage(templateName string, messageData ChatMessageTemplateData) (string, error) { + return ml.templates.EvaluateTemplate(ChatMessageTemplate, templateName, messageData) +} diff --git a/pkg/model/loader_test.go b/pkg/model/template_test.go similarity index 100% rename from pkg/model/loader_test.go rename to pkg/model/template_test.go diff --git a/pkg/model/watchdog.go b/pkg/model/watchdog.go index b5381832..5702dda5 100644 --- a/pkg/model/watchdog.go +++ b/pkg/model/watchdog.go @@ -8,6 +8,7 @@ import ( "github.com/rs/zerolog/log" ) +// WatchDog tracks all the requests from GRPC clients. // All GRPC Clients created by ModelLoader should have an associated injected // watchdog that will keep track of the state of each backend (busy or not) // and for how much time it has been busy. @@ -15,7 +16,6 @@ import ( // force a reload of the model // The watchdog runs as a separate go routine, // and the GRPC client talks to it via a channel to send status updates - type WatchDog struct { sync.Mutex timetable map[string]time.Time From 5d892f86eaca87510b7e27fd496fed904778df31 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 26 Aug 2024 14:47:36 +0200 Subject: [PATCH 0015/1530] chore(cuda): reduce binary size (#3379) fix(cuda): reduce binary size Signed-off-by: Ettore Di Giacinto --- Dockerfile | 9 ++++++++- Makefile | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9d651760..14e037e6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -286,7 +286,14 @@ COPY --from=grpc /opt/grpc /usr/local WORKDIR /build ## Build the binary -RUN make build +## If it's CUDA, we want to skip some of the llama-compat backends to save space +## We only leave the most CPU-optimized variant and the fallback for the cublas build +## (both will use CUDA for the actual computation) +RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \ + SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \ + else \ + make build; \ + fi RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \ mkdir -p /build/sources/go-piper/piper-phonemize/pi/lib/ \ diff --git a/Makefile b/Makefile index 2ecbaea8..ca8077af 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=3ba780e2a8f0ffe13f571b27f0bbf2ca5a199efc +CPPLLAMA_VERSION?=e11bd856d538e44d24d8cad4b0381fba0984d162 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 11eaf9c0a705e42c50def0e4a45448c427c7d33a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 26 Aug 2024 17:39:54 +0200 Subject: [PATCH 0016/1530] models(gallery): add calme-2.1-phi3.5-4b-i1 (#3383) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d46da159..ff350b3b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4604,6 +4604,21 @@ - filename: Phi-3.5-mini-instruct.Q4_K_M.gguf sha256: 3f68916e850b107d8641d18bcd5548f0d66beef9e0a9077fe84ef28943eb7e88 uri: huggingface://MaziyarPanahi/Phi-3.5-mini-instruct-GGUF/Phi-3.5-mini-instruct.Q4_K_M.gguf +- !!merge <<: *phi-3 + name: "calme-2.1-phi3.5-4b-i1" + icon: https://huggingface.co/MaziyarPanahi/calme-2.1-phi3.5-4b/resolve/main/calme-2.webp + urls: + - https://huggingface.co/MaziyarPanahi/calme-2.1-phi3.5-4b + - https://huggingface.co/mradermacher/calme-2.1-phi3.5-4b-i1-GGUF + description: | + This model is a fine-tuned version of the microsoft/Phi-3.5-mini-instruct, pushing the boundaries of natural language understanding and generation even further. My goal was to create a versatile and robust model that excels across a wide range of benchmarks and real-world applications. + overrides: + parameters: + model: calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf + files: + - filename: calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf + sha256: 989eccacd52b6d9ebf2c06c35c363da19aadb125659a10df299b7130bc293e77 + uri: huggingface://mradermacher/calme-2.1-phi3.5-4b-i1-GGUF/calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf - &hermes-2-pro-mistral ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From b38fd8780b9f9057ae708bd219cefc30a2cc8e37 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 26 Aug 2024 17:53:47 +0200 Subject: [PATCH 0017/1530] models(gallery): add magnum-v3-34b (#3384) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index ff350b3b..d66b3236 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3160,6 +3160,23 @@ - filename: Master-Yi-9B_Q4_K_M.gguf sha256: 57e2afcf9f24d7138a3b8e2b547336d7edc13621a5e8090bc196d7de360b2b45 uri: huggingface://qnguyen3/Master-Yi-9B-GGUF/Master-Yi-9B_Q4_K_M.gguf +- !!merge <<: *yi-chat + name: "magnum-v3-34b" + icon: https://cdn-uploads.huggingface.co/production/uploads/658a46cbfb9c2bdfae75b3a6/9yEmnTDG9bcC_bxwuDU6G.png + urls: + - https://huggingface.co/anthracite-org/magnum-v3-34b + - https://huggingface.co/bartowski/magnum-v3-34b-GGUF + description: | + This is the 9th in a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet and Opus. + + This model is fine-tuned on top of Yi-1.5-34 B-32 K. + overrides: + parameters: + model: magnum-v3-34b-Q4_K_M.gguf + files: + - filename: magnum-v3-34b-Q4_K_M.gguf + sha256: f902956c0731581f1ff189e547e6e5aad86b77af5f4dc7e4fc26bcda5c1f7cc3 + uri: huggingface://bartowski/magnum-v3-34b-GGUF/magnum-v3-34b-Q4_K_M.gguf - &vicuna-chat ## LLama2 and derivatives ### Start Fimbulvetr From 18dddc1ae0b54627030917384ddd0374f1b4da05 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 26 Aug 2024 20:19:27 +0200 Subject: [PATCH 0018/1530] chore(deps): update edgevpn (#3385) Signed-off-by: Ettore Di Giacinto --- go.mod | 31 ++++++++++++++++++------ go.sum | 76 ++++++++++++++++++++++++++++++++++++++++------------------ 2 files changed, 77 insertions(+), 30 deletions(-) diff --git a/go.mod b/go.mod index e9255a1e..71545d82 100644 --- a/go.mod +++ b/go.mod @@ -9,8 +9,10 @@ require ( github.com/M0Rf30/go-tiny-dream v0.0.0-20240425104733-c04fa463ace9 github.com/Masterminds/sprig/v3 v3.2.3 github.com/alecthomas/kong v0.9.0 + github.com/census-instrumentation/opencensus-proto v0.4.1 github.com/charmbracelet/glamour v0.7.0 github.com/chasefleming/elem-go v0.26.0 + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b github.com/containerd/containerd v1.7.19 github.com/donomii/go-rwkv.cpp v0.0.0-20240228065144-661e7ae26d44 github.com/elliotchance/orderedmap/v2 v2.2.0 @@ -23,8 +25,10 @@ require ( github.com/gofiber/swagger v1.0.0 github.com/gofiber/template/html/v2 v2.1.2 github.com/gofrs/flock v0.12.1 + github.com/golang/protobuf v1.5.4 github.com/google/go-containerregistry v0.19.2 github.com/google/uuid v1.6.0 + github.com/grpc-ecosystem/grpc-gateway v1.5.0 github.com/hpcloud/tail v1.0.0 github.com/ipfs/go-log v1.0.5 github.com/jaypipes/ghw v0.12.0 @@ -33,8 +37,8 @@ require ( github.com/libp2p/go-libp2p v0.36.2 github.com/mholt/archiver/v3 v3.5.1 github.com/microcosm-cc/bluemonday v1.0.26 + github.com/mudler/edgevpn v0.27.4 github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 - github.com/mudler/edgevpn v0.27.3 github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f github.com/onsi/ginkgo/v2 v2.20.0 github.com/onsi/gomega v1.34.1 @@ -56,6 +60,7 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.50.0 go.opentelemetry.io/otel/metric v1.28.0 go.opentelemetry.io/otel/sdk/metric v1.28.0 + google.golang.org/api v0.180.0 google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 @@ -64,8 +69,17 @@ require ( ) require ( + cel.dev/expr v0.15.0 // indirect + cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pion/datachannel v1.5.8 // indirect @@ -86,7 +100,10 @@ require ( github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/shirou/gopsutil/v4 v4.24.7 // indirect github.com/wlynxg/anet v0.0.4 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.uber.org/mock v0.4.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect ) require ( @@ -160,11 +177,11 @@ require ( github.com/huandu/xstrings v1.3.3 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/imdario/mergo v0.3.16 // indirect - github.com/ipfs/boxo v0.10.0 // indirect + github.com/ipfs/boxo v0.21.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipld/go-ipld-prime v0.20.0 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jaypipes/pcidb v1.0.0 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect @@ -177,11 +194,11 @@ require ( github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.25.2 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.26.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-libp2p-pubsub v0.12.0 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect @@ -288,8 +305,8 @@ require ( golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 // indirect golang.zx2c4.com/wireguard v0.0.0-20220703234212-c31a7b1ab478 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect - gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + gonum.org/v1/gonum v0.15.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect howett.net/plist v1.0.0 // indirect diff --git a/go.sum b/go.sum index 85800fd6..7de4fdfa 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,15 @@ +cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -61,6 +69,8 @@ github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szN github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng= @@ -70,6 +80,8 @@ github.com/chasefleming/elem-go v0.26.0/go.mod h1:hz73qILBIKnTgOujnSMtEj20/epI+f github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -135,13 +147,17 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -224,6 +240,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -251,6 +269,8 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -258,8 +278,12 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -268,6 +292,7 @@ github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -291,23 +316,23 @@ github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFck github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY= -github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM= +github.com/ipfs/boxo v0.21.0 h1:XpGXb+TQQ0IUdYaeAxGzWjSs6ow/Lce148A/2IbRDVE= +github.com/ipfs/boxo v0.21.0/go.mod h1:NmweAYeY1USOaJJxouy7DLr/Y5M8UBSsCI2KRivO+TY= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= -github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jaypipes/ghw v0.12.0 h1:xU2/MDJfWmBhJnujHY9qwXQLs3DBsf0/Xa9vECY0Tho= @@ -367,16 +392,16 @@ github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= -github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= +github.com/libp2p/go-libp2p-kad-dht v0.26.1 h1:AazV3LCImYVkDUGAHx5lIEgZ9iUI2QQKH5GMRQU8uEA= +github.com/libp2p/go-libp2p-kad-dht v0.26.1/go.mod h1:mqRUGJ/+7ziQ3XknU2kKHfsbbgb9xL65DXjPOJwmZF8= github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-pubsub v0.12.0 h1:PENNZjSfk8KYxANRlpipdS7+BfLmOl3L2E/6vSNjbdI= github.com/libp2p/go-libp2p-pubsub v0.12.0/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0= -github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -455,10 +480,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mudler/edgevpn v0.27.2 h1:FsQ95jPCDJP9LzKJYCHx70z08DGXK5yrHMzH9Qok3nE= -github.com/mudler/edgevpn v0.27.2/go.mod h1:PK7rl0QQQTdlpie9rlaS7DguH500ogqproQli/QwrxU= -github.com/mudler/edgevpn v0.27.3 h1:9g6M7Q+2GdwDN12KmjhYJDi69cttvDW7luBmZioD2ZM= -github.com/mudler/edgevpn v0.27.3/go.mod h1:PK7rl0QQQTdlpie9rlaS7DguH500ogqproQli/QwrxU= +github.com/mudler/edgevpn v0.27.4 h1:T/irkShcnU6h9OZqtvWXpNo+3gQVPUMBEoUutxJ3YUg= +github.com/mudler/edgevpn v0.27.4/go.mod h1:NFs/RpDHCaltPFnZmOLCiUmVpTQloER1LbAtptOzqrw= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d h1:8udOFrDf/I83JL0/u22j6U6Q9z9LoSdby2a/DWdd0/s= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 h1:FVT07EI8njvsD4tC2Hw8Xhactp5AWhsQWD4oTeQuSAU= @@ -509,9 +532,6 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -772,6 +792,8 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= @@ -873,6 +895,8 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -990,11 +1014,13 @@ golang.zx2c4.com/wireguard v0.0.0-20220703234212-c31a7b1ab478 h1:vDy//hdR+GnROE3 golang.zx2c4.com/wireguard v0.0.0-20220703234212-c31a7b1ab478/go.mod h1:bVQfyl2sCM/QIIGHpWbFGfHPuDvqnCNkT6MQLTCjO/U= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= -gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= -gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.180.0 h1:M2D87Yo0rGBPWpo1orwfCLehUUL6E7/TYe5gvMQWDh4= +google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1006,8 +1032,12 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= From 47fe31aa53b4b7d222986a785d816d0c4b8d3f7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 20:57:23 +0000 Subject: [PATCH 0019/1530] chore(deps): Bump openai from 1.41.1 to 1.42.0 in /examples/functions (#3390) Bumps [openai](https://github.com/openai/openai-python) from 1.41.1 to 1.42.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.41.1...v1.42.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 78ffdab7..5713a163 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.2.14 -openai==1.41.1 +openai==1.42.0 From bbfa5075f6792df62bb09448b49adcab7d9f5ce6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 21:44:22 +0000 Subject: [PATCH 0020/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `82a5e98` to `3a0ae52` (#3391) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `82a5e98` to `3a0ae52`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/82a5e9876c67f4c86b9e37e825e27c951ce18d54...3a0ae52e610bbf99e6a4182a1d7889954e9b7d26) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index 82a5e987..3a0ae52e 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit 82a5e9876c67f4c86b9e37e825e27c951ce18d54 +Subproject commit 3a0ae52e610bbf99e6a4182a1d7889954e9b7d26 From 311954f41bbd8f0f37e0eff20671bb4ef91524e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 00:11:11 +0000 Subject: [PATCH 0021/1530] chore(deps): Bump idna from 3.7 to 3.8 in /examples/langchain/langchainpy-localai-example (#3399) chore(deps): Bump idna Bumps [idna](https://github.com/kjd/idna) from 3.7 to 3.8. - [Release notes](https://github.com/kjd/idna/releases) - [Changelog](https://github.com/kjd/idna/blob/master/HISTORY.rst) - [Commits](https://github.com/kjd/idna/compare/v3.7...v3.8) --- updated-dependencies: - dependency-name: idna dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 59330758..6afcda30 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -9,7 +9,7 @@ dataclasses-json==0.6.7 debugpy==1.8.2 frozenlist==1.4.1 greenlet==3.0.3 -idna==3.7 +idna==3.8 langchain==0.2.14 langchain-community==0.2.12 marshmallow==3.21.3 From fc640be59181c6acbaee80d7f1f5e2118d080c4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 00:51:50 +0000 Subject: [PATCH 0022/1530] chore(deps): Bump llama-index from 0.10.65 to 0.11.1 in /examples/chainlit (#3404) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.10.65 to 0.11.1. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.10.65...v0.11.1) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 9e8b3b31..b5ea6cf4 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.10.65 +llama_index==0.11.1 requests==2.32.3 weaviate_client==4.6.7 transformers From 11ed1cebb37c401ff2bdd959d0901f1c30ebe35a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 02:11:18 +0000 Subject: [PATCH 0023/1530] chore(deps): Bump llama-index from 0.10.67.post1 to 0.11.1 in /examples/langchain-chroma (#3406) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.10.67.post1 to 0.11.1. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.10.67.post1...v0.11.1) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 171ab009..890321a3 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.14 openai==1.40.5 chromadb==0.5.5 -llama-index==0.10.67.post1 \ No newline at end of file +llama-index==0.11.1 \ No newline at end of file From d5c0ad8a1b1cd9b58f53a22abe4379e2a834db2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 03:58:19 +0000 Subject: [PATCH 0024/1530] chore(deps): Bump marshmallow from 3.21.3 to 3.22.0 in /examples/langchain/langchainpy-localai-example (#3400) chore(deps): Bump marshmallow Bumps [marshmallow](https://github.com/marshmallow-code/marshmallow) from 3.21.3 to 3.22.0. - [Changelog](https://github.com/marshmallow-code/marshmallow/blob/dev/CHANGELOG.rst) - [Commits](https://github.com/marshmallow-code/marshmallow/compare/3.21.3...3.22.0) --- updated-dependencies: - dependency-name: marshmallow dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 6afcda30..4404913a 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -12,7 +12,7 @@ greenlet==3.0.3 idna==3.8 langchain==0.2.14 langchain-community==0.2.12 -marshmallow==3.21.3 +marshmallow==3.22.0 marshmallow-enum==1.5.1 multidict==6.0.5 mypy-extensions==1.0.0 From 00ad01fd79025afd703d5ea01576bda6c4113579 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 04:39:18 +0000 Subject: [PATCH 0025/1530] chore(deps): Bump openai from 1.40.5 to 1.42.0 in /examples/langchain-chroma (#3405) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.40.5 to 1.42.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.40.5...v1.42.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 890321a3..4e80315b 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.14 -openai==1.40.5 +openai==1.42.0 chromadb==0.5.5 llama-index==0.11.1 \ No newline at end of file From a0252127a2abb11f6698a2184f916b63cf26c9be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 05:34:28 +0000 Subject: [PATCH 0026/1530] chore(deps): Bump openai from 1.41.1 to 1.42.0 in /examples/langchain/langchainpy-localai-example (#3401) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.41.1 to 1.42.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.41.1...v1.42.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 4404913a..223a4ea4 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.0.5 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.0 -openai==1.41.1 +openai==1.42.0 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.8.2 From 6d0ede813f35462f8b1a87902f82ff2d5b6d8432 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:44:08 +0200 Subject: [PATCH 0027/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `7d787ed96c32be18603c158ab0276992cf0dc346` (#3409) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ca8077af..2ab31246 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=e11bd856d538e44d24d8cad4b0381fba0984d162 +CPPLLAMA_VERSION?=7d787ed96c32be18603c158ab0276992cf0dc346 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From cac472d4a12d4eceec7c9b1d657f988929d50c02 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 27 Aug 2024 10:48:55 +0200 Subject: [PATCH 0028/1530] chore(deps): update edgevpn to v0.28 (#3412) Signed-off-by: Ettore Di Giacinto --- go.mod | 9 ++++++++- go.sum | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 71545d82..e621a8c3 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/libp2p/go-libp2p v0.36.2 github.com/mholt/archiver/v3 v3.5.1 github.com/microcosm-cc/bluemonday v1.0.26 - github.com/mudler/edgevpn v0.27.4 + github.com/mudler/edgevpn v0.28.0 github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f github.com/onsi/ginkgo/v2 v2.20.0 @@ -73,6 +73,7 @@ require ( cloud.google.com/go/auth v0.4.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect @@ -80,6 +81,8 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/labstack/echo/v4 v4.12.0 // indirect + github.com/labstack/gommon v0.4.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pion/datachannel v1.5.8 // indirect @@ -98,8 +101,12 @@ require ( github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil/v4 v4.24.7 // indirect + github.com/urfave/cli/v2 v2.27.4 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect github.com/wlynxg/anet v0.0.4 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.uber.org/mock v0.4.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect diff --git a/go.sum b/go.sum index 7de4fdfa..045eba6d 100644 --- a/go.sum +++ b/go.sum @@ -102,6 +102,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/mds v0.7.0 h1:7QoYqiPl18C0h7CLq9z9/qUH5Vr62V9677yJZHGLoQM= github.com/creachadair/mds v0.7.0/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo= github.com/creachadair/otp v0.4.2 h1:ngNMaD6Tzd7UUNRFyed7ykZFn/Wr5sSs5ffqZWm9pu8= @@ -380,6 +382,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+kI0= +github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2 h1:hRGSmZu7j271trc9sneMrpOW7GN5ngLm8YUZIPzf394= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= @@ -482,6 +488,8 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mudler/edgevpn v0.27.4 h1:T/irkShcnU6h9OZqtvWXpNo+3gQVPUMBEoUutxJ3YUg= github.com/mudler/edgevpn v0.27.4/go.mod h1:NFs/RpDHCaltPFnZmOLCiUmVpTQloER1LbAtptOzqrw= +github.com/mudler/edgevpn v0.28.0 h1:oF/Msx3zPNajy3uYLPRT5M7H3Z+sCMU0wAA8TkB11PI= +github.com/mudler/edgevpn v0.28.0/go.mod h1:/xk8vnXUcGajPPMW5rZhPt1aD0b95LeOj2xGbRbDS8A= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d h1:8udOFrDf/I83JL0/u22j6U6Q9z9LoSdby2a/DWdd0/s= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 h1:FVT07EI8njvsD4tC2Hw8Xhactp5AWhsQWD4oTeQuSAU= @@ -650,6 +658,7 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sashabaranov/go-openai v1.26.2 h1:cVlQa3gn3eYqNXRW03pPlpy6zLG52EU4g0FrWXc0EFI= github.com/sashabaranov/go-openai v1.26.2/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= @@ -746,11 +755,16 @@ github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= +github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8= github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= @@ -778,6 +792,8 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= From 8369614b6ee2994a650da62d53db0409c4faaf4f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 27 Aug 2024 13:03:16 +0200 Subject: [PATCH 0029/1530] chore(deps): update edgevpn to v0.28.2 --- go.mod | 8 ++++---- go.sum | 9 +++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index e621a8c3..4d3f3e31 100644 --- a/go.mod +++ b/go.mod @@ -37,10 +37,10 @@ require ( github.com/libp2p/go-libp2p v0.36.2 github.com/mholt/archiver/v3 v3.5.1 github.com/microcosm-cc/bluemonday v1.0.26 - github.com/mudler/edgevpn v0.28.0 + github.com/mudler/edgevpn v0.28.2 github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f - github.com/onsi/ginkgo/v2 v2.20.0 + github.com/onsi/ginkgo/v2 v2.20.1 github.com/onsi/gomega v1.34.1 github.com/ory/dockertest/v3 v3.10.0 github.com/otiai10/openaigo v1.7.0 @@ -283,8 +283,8 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/tcplisten v1.0.0 // indirect github.com/vbatts/tar-split v0.11.3 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect + github.com/vishvananda/netlink v1.3.0 // indirect + github.com/vishvananda/netns v0.0.4 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect diff --git a/go.sum b/go.sum index 045eba6d..39bbb51e 100644 --- a/go.sum +++ b/go.sum @@ -490,6 +490,8 @@ github.com/mudler/edgevpn v0.27.4 h1:T/irkShcnU6h9OZqtvWXpNo+3gQVPUMBEoUutxJ3YUg github.com/mudler/edgevpn v0.27.4/go.mod h1:NFs/RpDHCaltPFnZmOLCiUmVpTQloER1LbAtptOzqrw= github.com/mudler/edgevpn v0.28.0 h1:oF/Msx3zPNajy3uYLPRT5M7H3Z+sCMU0wAA8TkB11PI= github.com/mudler/edgevpn v0.28.0/go.mod h1:/xk8vnXUcGajPPMW5rZhPt1aD0b95LeOj2xGbRbDS8A= +github.com/mudler/edgevpn v0.28.2 h1:wxLrH9b3NNQDgMb0Uy4gmqbGh6Ad5jdbf21GrU32xVU= +github.com/mudler/edgevpn v0.28.2/go.mod h1:HWcdIwj5zBgOD04Hn3I+J5E5Yb3kK1CwwWaEe6/QERo= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d h1:8udOFrDf/I83JL0/u22j6U6Q9z9LoSdby2a/DWdd0/s= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 h1:FVT07EI8njvsD4tC2Hw8Xhactp5AWhsQWD4oTeQuSAU= @@ -540,6 +542,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/ginkgo/v2 v2.20.1 h1:YlVIbqct+ZmnEph770q9Q7NVAz4wwIiVNahee6JyUzo= +github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -773,9 +777,13 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= +github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -963,6 +971,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= From 6a6094a58d2a0eeb6ec8bd5b23d6882914e7dd7a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 27 Aug 2024 17:29:32 +0200 Subject: [PATCH 0030/1530] chore(deps): update edgevpn to v0.28.3 --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 4d3f3e31..57202ad2 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/libp2p/go-libp2p v0.36.2 github.com/mholt/archiver/v3 v3.5.1 github.com/microcosm-cc/bluemonday v1.0.26 - github.com/mudler/edgevpn v0.28.2 + github.com/mudler/edgevpn v0.28.3 github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f github.com/onsi/ginkgo/v2 v2.20.1 diff --git a/go.sum b/go.sum index 39bbb51e..ab64b84a 100644 --- a/go.sum +++ b/go.sum @@ -492,6 +492,8 @@ github.com/mudler/edgevpn v0.28.0 h1:oF/Msx3zPNajy3uYLPRT5M7H3Z+sCMU0wAA8TkB11PI github.com/mudler/edgevpn v0.28.0/go.mod h1:/xk8vnXUcGajPPMW5rZhPt1aD0b95LeOj2xGbRbDS8A= github.com/mudler/edgevpn v0.28.2 h1:wxLrH9b3NNQDgMb0Uy4gmqbGh6Ad5jdbf21GrU32xVU= github.com/mudler/edgevpn v0.28.2/go.mod h1:HWcdIwj5zBgOD04Hn3I+J5E5Yb3kK1CwwWaEe6/QERo= +github.com/mudler/edgevpn v0.28.3 h1:yIuoMExwKHy/mNMBXIsm6FUFbnB9ELIxw9KXrK9KHDk= +github.com/mudler/edgevpn v0.28.3/go.mod h1:HWcdIwj5zBgOD04Hn3I+J5E5Yb3kK1CwwWaEe6/QERo= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d h1:8udOFrDf/I83JL0/u22j6U6Q9z9LoSdby2a/DWdd0/s= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 h1:FVT07EI8njvsD4tC2Hw8Xhactp5AWhsQWD4oTeQuSAU= From da3bc8077d0d4257f8f880c1232cba98e255319b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 27 Aug 2024 17:35:16 +0200 Subject: [PATCH 0031/1530] fix(tts): check error before inspecting result (#3415) Otherwise we panic when a res is nil Signed-off-by: Ettore Di Giacinto --- core/backend/tts.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/backend/tts.go b/core/backend/tts.go index 13a851ba..258882ae 100644 --- a/core/backend/tts.go +++ b/core/backend/tts.go @@ -78,6 +78,9 @@ func ModelTTS( Dst: filePath, Language: &language, }) + if err != nil { + return "", nil, err + } // return RPC error if any if !res.Success { From bc684c259c5afb73e64bc6c145e2aa261562a468 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 27 Aug 2024 23:41:49 +0200 Subject: [PATCH 0032/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `20f1789dfb4e535d64ba2f523c64929e7891f428` (#3417) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2ab31246..6fbe59ee 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=7d787ed96c32be18603c158ab0276992cf0dc346 +CPPLLAMA_VERSION?=20f1789dfb4e535d64ba2f523c64929e7891f428 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From b5b01ea6358afe1e605cc322b6ddae3a6b8c5651 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 28 Aug 2024 09:42:06 +0200 Subject: [PATCH 0033/1530] models(gallery): add phi-3.5-vision (#3421) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d66b3236..eb486a31 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4636,6 +4636,24 @@ - filename: calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf sha256: 989eccacd52b6d9ebf2c06c35c363da19aadb125659a10df299b7130bc293e77 uri: huggingface://mradermacher/calme-2.1-phi3.5-4b-i1-GGUF/calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf +- !!merge <<: *phi-3 + name: "phi-3.5-vision-instruct" + urls: + - https://huggingface.co/microsoft/Phi-3.5-vision-instruct + - https://huggingface.co/abetlen/Phi-3.5-vision-instruct-gguf + description: | + Phi-3.5-vision is a lightweight, state-of-the-art open multimodal model built upon datasets which include - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data both on text and vision. The model belongs to the Phi-3 model family, and the multimodal version comes with 128K context length (in tokens) it can support. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning and direct preference optimization to ensure precise instruction adherence and robust safety measures. + overrides: + mmproj: Phi-3.5-3.8B-vision-instruct-mmproj-F16.gguf + parameters: + model: Phi-3.5-3.8B-vision-instruct-Q8_0.gguf + files: + - filename: Phi-3.5-3.8B-vision-instruct-Q8_0.gguf + sha256: ad0a1ee23ea9d88e932b493a4c077dea95c0f52a0f57a604509504c6ebc3df12 + uri: huggingface://abetlen/Phi-3.5-vision-instruct-gguf/Phi-3.5-3.8B-vision-instruct-Q8_0.gguf + - filename: Phi-3.5-3.8B-vision-instruct-mmproj-F16.gguf + sha256: ab8449cc7527c21d7082a6ca8266f67a71b459019f67e814ae1683700e61f3f9 + uri: huggingface://abetlen/Phi-3.5-vision-instruct-gguf/Phi-3.5-3.8B-vision-instruct-mmproj-F16.gguf - &hermes-2-pro-mistral ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From e1d0d940736b68ca628f9075afd25c05a457fd45 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 28 Aug 2024 10:30:14 +0200 Subject: [PATCH 0034/1530] Revert "models(gallery): add phi-3.5-vision" (#3422) Revert "models(gallery): add phi-3.5-vision (#3421)" This reverts commit b5b01ea6358afe1e605cc322b6ddae3a6b8c5651. --- gallery/index.yaml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index eb486a31..d66b3236 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4636,24 +4636,6 @@ - filename: calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf sha256: 989eccacd52b6d9ebf2c06c35c363da19aadb125659a10df299b7130bc293e77 uri: huggingface://mradermacher/calme-2.1-phi3.5-4b-i1-GGUF/calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf -- !!merge <<: *phi-3 - name: "phi-3.5-vision-instruct" - urls: - - https://huggingface.co/microsoft/Phi-3.5-vision-instruct - - https://huggingface.co/abetlen/Phi-3.5-vision-instruct-gguf - description: | - Phi-3.5-vision is a lightweight, state-of-the-art open multimodal model built upon datasets which include - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data both on text and vision. The model belongs to the Phi-3 model family, and the multimodal version comes with 128K context length (in tokens) it can support. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning and direct preference optimization to ensure precise instruction adherence and robust safety measures. - overrides: - mmproj: Phi-3.5-3.8B-vision-instruct-mmproj-F16.gguf - parameters: - model: Phi-3.5-3.8B-vision-instruct-Q8_0.gguf - files: - - filename: Phi-3.5-3.8B-vision-instruct-Q8_0.gguf - sha256: ad0a1ee23ea9d88e932b493a4c077dea95c0f52a0f57a604509504c6ebc3df12 - uri: huggingface://abetlen/Phi-3.5-vision-instruct-gguf/Phi-3.5-3.8B-vision-instruct-Q8_0.gguf - - filename: Phi-3.5-3.8B-vision-instruct-mmproj-F16.gguf - sha256: ab8449cc7527c21d7082a6ca8266f67a71b459019f67e814ae1683700e61f3f9 - uri: huggingface://abetlen/Phi-3.5-vision-instruct-gguf/Phi-3.5-3.8B-vision-instruct-mmproj-F16.gguf - &hermes-2-pro-mistral ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From d2da2f16728df099ac017c907114501162f6326f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 28 Aug 2024 10:38:18 +0200 Subject: [PATCH 0035/1530] chore(docs): add links to demo and explorer Signed-off-by: Ettore Di Giacinto --- README.md | 2 +- docs/content/docs/overview.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ce3289f9..eb39b075 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ > :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/) > -> [💻 Quickstart](https://localai.io/basics/getting_started/) [📣 News](https://localai.io/basics/news/) [ 🛫 Examples ](https://github.com/go-skynet/LocalAI/tree/master/examples/) [ 🖼️ Models ](https://localai.io/models/) [ 🚀 Roadmap ](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) +> [💻 Quickstart](https://localai.io/basics/getting_started/) [📣 News](https://localai.io/basics/news/) [ 🛫 Examples ](https://github.com/go-skynet/LocalAI/tree/master/examples/) [ 🖼️ Models ](https://localai.io/models/) [ 🚀 Roadmap ](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [ 🥽 Demo ](https://demo.localai.io) [ 🌍 Explorer ](https://explorer.localai.io) [![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai) diff --git a/docs/content/docs/overview.md b/docs/content/docs/overview.md index f90b1ded..8bd28db9 100644 --- a/docs/content/docs/overview.md +++ b/docs/content/docs/overview.md @@ -51,7 +51,7 @@ icon = "info" > 💡 Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [💭Discord](https://discord.gg/uJAeKSAGDy) > -> [💻 Quickstart](https://localai.io/basics/getting_started/) [📣 News](https://localai.io/basics/news/) [ 🛫 Examples ](https://github.com/go-skynet/LocalAI/tree/master/examples/) [ 🖼️ Models ](https://localai.io/models/) [ 🚀 Roadmap ](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) +> [💻 Quickstart](https://localai.io/basics/getting_started/) [📣 News](https://localai.io/basics/news/) [ 🛫 Examples ](https://github.com/go-skynet/LocalAI/tree/master/examples/) [ 🖼️ Models ](https://localai.io/models/) [ 🚀 Roadmap ](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [ 🥽 Demo ](https://demo.localai.io) [ 🌍 Explorer ](https://explorer.localai.io) From 12950cac21f24ea054b03f23e62559f2dfa2bc45 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 28 Aug 2024 10:40:41 +0200 Subject: [PATCH 0036/1530] chore(docs): update links Signed-off-by: Ettore Di Giacinto --- README.md | 2 +- docs/content/docs/overview.md | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index eb39b075..bf58e1aa 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ > :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/) > -> [💻 Quickstart](https://localai.io/basics/getting_started/) [📣 News](https://localai.io/basics/news/) [ 🛫 Examples ](https://github.com/go-skynet/LocalAI/tree/master/examples/) [ 🖼️ Models ](https://localai.io/models/) [ 🚀 Roadmap ](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [ 🥽 Demo ](https://demo.localai.io) [ 🌍 Explorer ](https://explorer.localai.io) +> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) [![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai) diff --git a/docs/content/docs/overview.md b/docs/content/docs/overview.md index 8bd28db9..5bcb6178 100644 --- a/docs/content/docs/overview.md +++ b/docs/content/docs/overview.md @@ -51,9 +51,7 @@ icon = "info" > 💡 Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [💭Discord](https://discord.gg/uJAeKSAGDy) > -> [💻 Quickstart](https://localai.io/basics/getting_started/) [📣 News](https://localai.io/basics/news/) [ 🛫 Examples ](https://github.com/go-skynet/LocalAI/tree/master/examples/) [ 🖼️ Models ](https://localai.io/models/) [ 🚀 Roadmap ](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [ 🥽 Demo ](https://demo.localai.io) [ 🌍 Explorer ](https://explorer.localai.io) - - +> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) **LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI API specifications for local inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families and architectures. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler). From 49739e85a09f90c6952e7faa03ec26d7aa2baccc Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 28 Aug 2024 14:48:16 +0200 Subject: [PATCH 0037/1530] Update README.md Signed-off-by: Ettore Di Giacinto --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index bf58e1aa..5b9a2c43 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu [Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) +- Aug 2024: 🆕 FLUX-1, [P2P Explorer](https://explorer.localai.io) - July 2024: 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723 - June 2024: 🆕 You can browse now the model gallery without LocalAI! Check out https://models.localai.io - June 2024: Support for models from OCI registries: https://github.com/mudler/LocalAI/pull/2628 From bb9a5aea9eb1d473b575bf28980ec445867f9465 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 28 Aug 2024 23:45:07 +0200 Subject: [PATCH 0038/1530] chore(model-gallery): :arrow_up: update checksum (#3425) :arrow_up: Checksum updates in gallery/index.yaml Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- gallery/index.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index d66b3236..df65bd5d 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -27,8 +27,8 @@ - filename: SmolLM-1.7B-Instruct.Q4_K_M.gguf sha256: 2b07eb2293ed3fc544a9858beda5bfb03dcabda6aa6582d3c85768c95f498d28 uri: huggingface://MaziyarPanahi/SmolLM-1.7B-Instruct-GGUF/SmolLM-1.7B-Instruct.Q4_K_M.gguf -## LLama3.1 - &llama31 + ## LLama3.1 url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png name: "meta-llama-3.1-8b-instruct" @@ -1132,7 +1132,7 @@ - https://huggingface.co/TheDrummer/Rocinante-12B-v1.1-GGUF - https://huggingface.co/TheDrummer/Rocinante-12B-v1.1 description: | - A versatile workhorse for any adventure! + A versatile workhorse for any adventure! overrides: parameters: model: Rocinante-12B-v1.1-Q4_K_M.gguf @@ -1708,7 +1708,7 @@ files: - filename: Meta-Llama-3-8B-Instruct.Q4_0.gguf uri: huggingface://QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct.Q4_0.gguf - sha256: 1977ae6185ef5bc476e27db85bb3d79ca4bd87e7b03399083c297d9c612d334c + sha256: 18c8eb909db870d456a823700b4c82f6259e6052899f0ebf2bddc9b2417cd355 - !!merge <<: *llama3 name: "llama3-8b-instruct:Q6_K" overrides: @@ -1717,7 +1717,7 @@ files: - filename: Meta-Llama-3-8B-Instruct.Q6_K.gguf uri: huggingface://QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct.Q6_K.gguf - sha256: d1cdc49a716674c8e2506039bef85b905376cdaafc33e449b5aa2cf88f9532ad + sha256: 67f8eb2218938a5fd711605d526d2287e9a4ad26849efdf3bf7c0c17dcbde018 - !!merge <<: *llama3 name: "llama-3-8b-instruct-abliterated" urls: From ae6d327698a71a731365b9f0eb8d9e8e1ffec9d0 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 29 Aug 2024 09:56:31 +0200 Subject: [PATCH 0039/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `9fe94ccac92693d4ae1bc283ff0574e8b3f4e765` (#3424) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6fbe59ee..69bb3aa6 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=20f1789dfb4e535d64ba2f523c64929e7891f428 +CPPLLAMA_VERSION?=9fe94ccac92693d4ae1bc283ff0574e8b3f4e765 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 11d960b2a6cca4028d6b3aff64e1bb5e09a3bb2f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 30 Aug 2024 00:10:17 +0200 Subject: [PATCH 0040/1530] chore(cli): be consistent between workers and expose ExtraLLamaCPPArgs to both (#3428) * chore(cli): be consistent between workers and expose ExtraLLamaCPPArgs to both Fixes: https://github.com/mudler/LocalAI/issues/3427 Signed-off-by: Ettore Di Giacinto * bump grpcio Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- backend/python/autogptq/requirements.txt | 2 +- backend/python/bark/requirements.txt | 2 +- backend/python/common/template/requirements.txt | 2 +- backend/python/coqui/requirements.txt | 2 +- backend/python/diffusers/requirements.txt | 2 +- backend/python/exllama/requirements.txt | 2 +- backend/python/exllama2/requirements.txt | 2 +- backend/python/mamba/requirements.txt | 2 +- backend/python/openvoice/requirements-intel.txt | 2 +- backend/python/openvoice/requirements.txt | 2 +- backend/python/parler-tts/requirements.txt | 2 +- backend/python/rerankers/requirements.txt | 2 +- backend/python/sentencetransformers/requirements.txt | 2 +- backend/python/transformers-musicgen/requirements.txt | 2 +- backend/python/transformers/requirements.txt | 2 +- backend/python/vall-e-x/requirements.txt | 2 +- backend/python/vllm/requirements.txt | 2 +- core/cli/worker/worker.go | 1 + core/cli/worker/worker_llamacpp.go | 5 ++--- core/cli/worker/worker_p2p.go | 1 - docs/content/docs/features/distributed_inferencing.md | 4 ++-- 21 files changed, 22 insertions(+), 23 deletions(-) diff --git a/backend/python/autogptq/requirements.txt b/backend/python/autogptq/requirements.txt index 174ccc94..150fcc1b 100644 --- a/backend/python/autogptq/requirements.txt +++ b/backend/python/autogptq/requirements.txt @@ -1,6 +1,6 @@ accelerate auto-gptq==0.7.1 -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi transformers \ No newline at end of file diff --git a/backend/python/bark/requirements.txt b/backend/python/bark/requirements.txt index ed15b678..6404b98e 100644 --- a/backend/python/bark/requirements.txt +++ b/backend/python/bark/requirements.txt @@ -1,4 +1,4 @@ bark==0.1.5 -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt index 047ef7d5..21610c1c 100644 --- a/backend/python/common/template/requirements.txt +++ b/backend/python/common/template/requirements.txt @@ -1,2 +1,2 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf \ No newline at end of file diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index 8fb684c0..d7708363 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,4 @@ TTS==0.22.0 -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/diffusers/requirements.txt b/backend/python/diffusers/requirements.txt index 2f85b4e3..043c7aba 100644 --- a/backend/python/diffusers/requirements.txt +++ b/backend/python/diffusers/requirements.txt @@ -1,5 +1,5 @@ setuptools -grpcio==1.66.0 +grpcio==1.66.1 pillow protobuf certifi diff --git a/backend/python/exllama/requirements.txt b/backend/python/exllama/requirements.txt index 3e227c2c..b9c192d5 100644 --- a/backend/python/exllama/requirements.txt +++ b/backend/python/exllama/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi setuptools \ No newline at end of file diff --git a/backend/python/exllama2/requirements.txt b/backend/python/exllama2/requirements.txt index d5c2cc5c..6fb018a0 100644 --- a/backend/python/exllama2/requirements.txt +++ b/backend/python/exllama2/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi wheel diff --git a/backend/python/mamba/requirements.txt b/backend/python/mamba/requirements.txt index 9b4dd772..8e1b0195 100644 --- a/backend/python/mamba/requirements.txt +++ b/backend/python/mamba/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index 75184a33..a9a4cc20 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -2,7 +2,7 @@ intel-extension-for-pytorch torch optimum[openvino] -grpcio==1.66.0 +grpcio==1.66.1 protobuf librosa==0.9.1 faster-whisper==1.0.3 diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index 71991dc0..b38805be 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf librosa faster-whisper diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index b843981e..0da3da13 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi llvmlite==0.43.0 \ No newline at end of file diff --git a/backend/python/rerankers/requirements.txt b/backend/python/rerankers/requirements.txt index 9b4dd772..8e1b0195 100644 --- a/backend/python/rerankers/requirements.txt +++ b/backend/python/rerankers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements.txt b/backend/python/sentencetransformers/requirements.txt index 9b4dd772..8e1b0195 100644 --- a/backend/python/sentencetransformers/requirements.txt +++ b/backend/python/sentencetransformers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/transformers-musicgen/requirements.txt b/backend/python/transformers-musicgen/requirements.txt index f4512663..fb1119a9 100644 --- a/backend/python/transformers-musicgen/requirements.txt +++ b/backend/python/transformers-musicgen/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf scipy==1.14.0 certifi \ No newline at end of file diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index 9e056af6..b19c59c0 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements.txt b/backend/python/vall-e-x/requirements.txt index 9b4dd772..8e1b0195 100644 --- a/backend/python/vall-e-x/requirements.txt +++ b/backend/python/vall-e-x/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/vllm/requirements.txt b/backend/python/vllm/requirements.txt index 3e227c2c..b9c192d5 100644 --- a/backend/python/vllm/requirements.txt +++ b/backend/python/vllm/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.0 +grpcio==1.66.1 protobuf certifi setuptools \ No newline at end of file diff --git a/core/cli/worker/worker.go b/core/cli/worker/worker.go index da1b0288..a5d06577 100644 --- a/core/cli/worker/worker.go +++ b/core/cli/worker/worker.go @@ -2,6 +2,7 @@ package worker type WorkerFlags struct { BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"` + ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"` } type Worker struct { diff --git a/core/cli/worker/worker_llamacpp.go b/core/cli/worker/worker_llamacpp.go index 2baf51ec..9fb69ca1 100644 --- a/core/cli/worker/worker_llamacpp.go +++ b/core/cli/worker/worker_llamacpp.go @@ -3,6 +3,7 @@ package worker import ( "fmt" "os" + "strings" "syscall" cliContext "github.com/mudler/LocalAI/core/cli/context" @@ -12,7 +13,6 @@ import ( ) type LLamaCPP struct { - Args []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"` WorkerFlags `embed:""` } @@ -34,9 +34,8 @@ func (r *LLamaCPP) Run(ctx *cliContext.Context) error { "llama-cpp-rpc-server", ) - args := os.Args[4:] + args := strings.Split(r.ExtraLLamaCPPArgs, " ") args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess) - args = append([]string{grpcProcess}, args...) return syscall.Exec( grpcProcess, diff --git a/core/cli/worker/worker_p2p.go b/core/cli/worker/worker_p2p.go index adfd2819..6275481b 100644 --- a/core/cli/worker/worker_p2p.go +++ b/core/cli/worker/worker_p2p.go @@ -25,7 +25,6 @@ type P2P struct { NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"` RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"` RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"` - ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"` Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"` } diff --git a/docs/content/docs/features/distributed_inferencing.md b/docs/content/docs/features/distributed_inferencing.md index c8c60f8d..8c7790c6 100644 --- a/docs/content/docs/features/distributed_inferencing.md +++ b/docs/content/docs/features/distributed_inferencing.md @@ -68,7 +68,7 @@ And navigate the WebUI to the "Swarm" section to see the instructions to connect To start workers for distributing the computational load, run: ```bash -local-ai worker llama-cpp-rpc +local-ai worker llama-cpp-rpc --llama-cpp-args="-H -p -m " ``` And you can specify the address of the workers when starting LocalAI with the `LLAMACPP_GRPC_SERVERS` environment variable: @@ -98,7 +98,7 @@ To reuse the same token later, restart the server with `--p2ptoken` or `P2P_TOKE 2. Start the workers. Copy the `local-ai` binary to other hosts and run as many workers as needed using the token: ```bash -TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc +TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc --llama-cpp-args="-m " # 1:06AM INF loading environment variables from file envFile=.env # 1:06AM INF Setting logging to info # {"level":"INFO","time":"2024-05-19T01:06:01.794+0200","caller":"config/config.go:288","message":"connmanager disabled\n"} From 69a3b22fa1b48aab9cb41e85aa3d3c98ca8a7fa9 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 30 Aug 2024 10:04:36 +0200 Subject: [PATCH 0041/1530] chore(tests): replace runaway models for tests (#3432) Signed-off-by: Ettore Di Giacinto --- backend/python/diffusers/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/python/diffusers/test.py b/backend/python/diffusers/test.py index b5e381ba..0e92538e 100644 --- a/backend/python/diffusers/test.py +++ b/backend/python/diffusers/test.py @@ -53,7 +53,7 @@ class TestBackendServicer(unittest.TestCase): self.setUp() with grpc.insecure_channel("localhost:50051") as channel: stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="runwayml/stable-diffusion-v1-5")) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="Lykon/dreamshaper-8")) self.assertTrue(response.success) self.assertEqual(response.message, "Model loaded successfully") except Exception as err: @@ -71,7 +71,7 @@ class TestBackendServicer(unittest.TestCase): self.setUp() with grpc.insecure_channel("localhost:50051") as channel: stub = backend_pb2_grpc.BackendStub(channel) - response = stub.LoadModel(backend_pb2.ModelOptions(Model="runwayml/stable-diffusion-v1-5")) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="Lykon/dreamshaper-8")) print(response.message) self.assertTrue(response.success) image_req = backend_pb2.GenerateImageRequest(positive_prompt="cat", width=16,height=16, dst="test.jpg") @@ -81,4 +81,4 @@ class TestBackendServicer(unittest.TestCase): print(err) self.fail("Image gen service failed") finally: - self.tearDown() \ No newline at end of file + self.tearDown() From 607fd066f0b47cca0d14bd65a64a6385f4f98be3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 30 Aug 2024 15:20:39 +0200 Subject: [PATCH 0042/1530] chore(model-loader): increase test coverage of model loader (#3433) chore(model-loader): increase coverage of model loader Signed-off-by: Ettore Di Giacinto --- pkg/model/loader.go | 33 +++++++++++- pkg/model/loader_test.go | 105 +++++++++++++++++++++++++++++++++++++++ pkg/model/model.go | 5 +- 3 files changed, 138 insertions(+), 5 deletions(-) create mode 100644 pkg/model/loader_test.go diff --git a/pkg/model/loader.go b/pkg/model/loader.go index c1ed01dc..90fda35f 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" "sync" + "time" "github.com/mudler/LocalAI/pkg/templates" @@ -102,6 +103,18 @@ FILE: return models, nil } +func (ml *ModelLoader) ListModels() []*Model { + ml.mu.Lock() + defer ml.mu.Unlock() + + models := []*Model{} + for _, model := range ml.models { + models = append(models, model) + } + + return models +} + func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) (*Model, error)) (*Model, error) { ml.mu.Lock() defer ml.mu.Unlock() @@ -120,7 +133,12 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( return nil, err } + if model == nil { + return nil, fmt.Errorf("loader didn't return a model") + } + ml.models[modelName] = model + return model, nil } @@ -146,11 +164,22 @@ func (ml *ModelLoader) CheckIsLoaded(s string) *Model { } log.Debug().Msgf("Model already loaded in memory: %s", s) - alive, err := m.GRPC(false, ml.wd).HealthCheck(context.Background()) + client := m.GRPC(false, ml.wd) + + log.Debug().Msgf("Checking model availability (%s)", s) + cTimeout, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + alive, err := client.HealthCheck(cTimeout) if !alive { log.Warn().Msgf("GRPC Model not responding: %s", err.Error()) log.Warn().Msgf("Deleting the process in order to recreate it") - if !ml.grpcProcesses[s].IsAlive() { + process, exists := ml.grpcProcesses[s] + if !exists { + log.Error().Msgf("Process not found for '%s' and the model is not responding anymore !", s) + return m + } + if !process.IsAlive() { log.Debug().Msgf("GRPC Process is not responding: %s", s) // stop and delete the process, this forces to re-load the model and re-create again the service err := ml.deleteProcess(s) diff --git a/pkg/model/loader_test.go b/pkg/model/loader_test.go new file mode 100644 index 00000000..4621844e --- /dev/null +++ b/pkg/model/loader_test.go @@ -0,0 +1,105 @@ +package model_test + +import ( + "errors" + "os" + "path/filepath" + + "github.com/mudler/LocalAI/pkg/model" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("ModelLoader", func() { + var ( + modelLoader *model.ModelLoader + modelPath string + mockModel *model.Model + ) + + BeforeEach(func() { + // Setup the model loader with a test directory + modelPath = "/tmp/test_model_path" + os.Mkdir(modelPath, 0755) + modelLoader = model.NewModelLoader(modelPath) + }) + + AfterEach(func() { + // Cleanup test directory + os.RemoveAll(modelPath) + }) + + Context("NewModelLoader", func() { + It("should create a new ModelLoader with an empty model map", func() { + Expect(modelLoader).ToNot(BeNil()) + Expect(modelLoader.ModelPath).To(Equal(modelPath)) + Expect(modelLoader.ListModels()).To(BeEmpty()) + }) + }) + + Context("ExistsInModelPath", func() { + It("should return true if a file exists in the model path", func() { + testFile := filepath.Join(modelPath, "test.model") + os.Create(testFile) + Expect(modelLoader.ExistsInModelPath("test.model")).To(BeTrue()) + }) + + It("should return false if a file does not exist in the model path", func() { + Expect(modelLoader.ExistsInModelPath("nonexistent.model")).To(BeFalse()) + }) + }) + + Context("ListFilesInModelPath", func() { + It("should list all valid model files in the model path", func() { + os.Create(filepath.Join(modelPath, "test.model")) + os.Create(filepath.Join(modelPath, "README.md")) + + files, err := modelLoader.ListFilesInModelPath() + Expect(err).To(BeNil()) + Expect(files).To(ContainElement("test.model")) + Expect(files).ToNot(ContainElement("README.md")) + }) + }) + + Context("LoadModel", func() { + It("should load a model and keep it in memory", func() { + mockModel = model.NewModel("test.model") + + mockLoader := func(modelName, modelFile string) (*model.Model, error) { + return mockModel, nil + } + + model, err := modelLoader.LoadModel("test.model", mockLoader) + Expect(err).To(BeNil()) + Expect(model).To(Equal(mockModel)) + Expect(modelLoader.CheckIsLoaded("test.model")).To(Equal(mockModel)) + }) + + It("should return an error if loading the model fails", func() { + mockLoader := func(modelName, modelFile string) (*model.Model, error) { + return nil, errors.New("failed to load model") + } + + model, err := modelLoader.LoadModel("test.model", mockLoader) + Expect(err).To(HaveOccurred()) + Expect(model).To(BeNil()) + }) + }) + + Context("ShutdownModel", func() { + It("should shutdown a loaded model", func() { + mockModel = model.NewModel("test.model") + + mockLoader := func(modelName, modelFile string) (*model.Model, error) { + return mockModel, nil + } + + _, err := modelLoader.LoadModel("test.model", mockLoader) + Expect(err).To(BeNil()) + + err = modelLoader.ShutdownModel("test.model") + Expect(err).To(BeNil()) + Expect(modelLoader.CheckIsLoaded("test.model")).To(BeNil()) + }) + }) +}) diff --git a/pkg/model/model.go b/pkg/model/model.go index 26ddb8cc..1927dc0c 100644 --- a/pkg/model/model.go +++ b/pkg/model/model.go @@ -23,7 +23,6 @@ func (m *Model) GRPC(parallel bool, wd *WatchDog) grpc.Backend { enableWD = true } - client := grpc.NewClient(m.address, parallel, wd, enableWD) - m.client = client - return client + m.client = grpc.NewClient(m.address, parallel, wd, enableWD) + return m.client } From 72f97e62bb6dead6f414b04892c324bab73408e4 Mon Sep 17 00:00:00 2001 From: fakezeta Date: Fri, 30 Aug 2024 23:27:49 +0200 Subject: [PATCH 0043/1530] feat: Added Piper voice it-paola-medium (#3434) Added Piper voice it-paola-medium Bundled from https://huggingface.co/rhasspy/piper-voices Signed-off-by: fakezeta --- gallery/index.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index df65bd5d..7398c538 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -5871,6 +5871,16 @@ - filename: voice-it-riccardo_fasol-x-low.tar.gz uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-it-riccardo_fasol-x-low.tar.gz sha256: 394b27b8780f5167e73a62ac103839cc438abc7edb544192f965e5b8f5f4acdb +- !!merge <<: *piper + url: github:mudler/LocalAI/gallery/piper.yaml@master + name: voice-it-paola-medium + overrides: + parameters: + model: it-paola-medium.onnx + files: + - filename: voice-it-paola-medium.tar.gz + uri: https://github.com/fakezeta/piper-paola-voice/releases/download/v1.0.0/voice-it-paola-medium.tar.gz + sha256: d117d1e8cdd022898546e1697a53ab5330b69cac32701e888ea3e3b10f4adb54 - !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-kk-iseke-x-low From b8e7a765246bca34d838656a7629eebc2c5a7b22 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 31 Aug 2024 01:21:45 +0200 Subject: [PATCH 0044/1530] chore(deps): update llama.cpp (#3438) Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/grpc-server.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 69bb3aa6..9e3cdd43 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=9fe94ccac92693d4ae1bc283ff0574e8b3f4e765 +CPPLLAMA_VERSION?=0ab30f8d82fc7156b750c194d64a887e80cbfb82 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index 57ab46fe..e1b6f868 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -1119,7 +1119,7 @@ struct llama_server_context continue; } - if (!llava_image_embed_make_with_clip_img(clp_ctx, params.n_threads, img.img_data, &img.image_embedding, &img.image_tokens)) { + if (!llava_image_embed_make_with_clip_img(clp_ctx, params.cpuparams.n_threads, img.img_data, &img.image_embedding, &img.image_tokens)) { LOG_TEE("Error processing the given image"); return false; } @@ -2210,7 +2210,7 @@ static void params_parse(const backend::ModelOptions* request, params.model_alias = request->modelfile(); params.n_ctx = request->contextsize(); //params.memory_f16 = request->f16memory(); - params.n_threads = request->threads(); + params.cpuparams.n_threads = request->threads(); params.n_gpu_layers = request->ngpulayers(); params.n_batch = request->nbatch(); // Set params.n_parallel by environment variable (LLAMA_PARALLEL), defaults to 1 From 2259512345b67286682c33b6209ba76f9a949bde Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 31 Aug 2024 23:40:57 +0200 Subject: [PATCH 0045/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `a47667cff41f5a198eb791974e0afcc1cddd3229` (#3441) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9e3cdd43..c12689d3 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=0ab30f8d82fc7156b750c194d64a887e80cbfb82 +CPPLLAMA_VERSION?=a47667cff41f5a198eb791974e0afcc1cddd3229 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 164dee65c323c257034cfd1def019e0737343612 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 31 Aug 2024 23:44:26 +0200 Subject: [PATCH 0046/1530] chore(model-gallery): :arrow_up: update checksum (#3442) :arrow_up: Checksum updates in gallery/index.yaml Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- gallery/index.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 7398c538..cca5234f 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -5880,7 +5880,7 @@ files: - filename: voice-it-paola-medium.tar.gz uri: https://github.com/fakezeta/piper-paola-voice/releases/download/v1.0.0/voice-it-paola-medium.tar.gz - sha256: d117d1e8cdd022898546e1697a53ab5330b69cac32701e888ea3e3b10f4adb54 + sha256: 61d3bac0ff6d347daea5464c4b3ae156a450b603a916cc9ed7deecdeba17153a - !!merge <<: *piper url: github:mudler/LocalAI/gallery/piper.yaml@master name: voice-kk-iseke-x-low From 45ce1803f8e7db6730ef1b665bc2706b4b9ee776 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 1 Sep 2024 12:33:18 +0200 Subject: [PATCH 0047/1530] models(gallery): add hubble-4b-v1 (#3444) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index cca5234f..2f06e8de 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -407,6 +407,22 @@ - filename: llama-3.1-storm-8b-q4_k_m.gguf sha256: d714e960211ee0fe6113d3131a6573e438f37debd07e1067d2571298624414a0 uri: huggingface://mudler/Llama-3.1-Storm-8B-Q4_K_M-GGUF/llama-3.1-storm-8b-q4_k_m.gguf +- !!merge <<: *llama31 + name: "hubble-4b-v1" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/R8_o3CCpTgKv5Wnnry7E_.png + urls: + - https://huggingface.co/TheDrummer/Hubble-4B-v1-GGUF + description: | + Equipped with his five senses, man explores the universe around him and calls the adventure 'Science'. + This is a finetune of Nvidia's Llama 3.1 4B Minitron - a shrunk down model of Llama 3.1 8B 128K. + overrides: + parameters: + model: Hubble-4B-v1-Q4_K_M.gguf + files: + - filename: Hubble-4B-v1-Q4_K_M.gguf + sha256: f8f44b0fb69e382cf0099a674959ab1a1d86e72ccd1bbce5392210019502a513 + uri: huggingface://TheDrummer/Hubble-4B-v1-GGUF/Hubble-4B-v1-Q4_K_M.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From 3daba4731c37bf30acd422688e2ef39948c695ec Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 1 Sep 2024 23:41:56 +0200 Subject: [PATCH 0048/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `8f1d81a0b6f50b9bad72db0b6fcd299ad9ecd48c` (#3445) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c12689d3..be80d875 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=a47667cff41f5a198eb791974e0afcc1cddd3229 +CPPLLAMA_VERSION?=8f1d81a0b6f50b9bad72db0b6fcd299ad9ecd48c # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 1655411ccd7d856ba9171f111aafa6a7b7c0c8c5 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Mon, 2 Sep 2024 15:44:47 +0200 Subject: [PATCH 0049/1530] chore(model-gallery): :arrow_up: update checksum (#3446) :arrow_up: Checksum updates in gallery/index.yaml Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- gallery/index.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 2f06e8de..da93ecd3 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -421,8 +421,8 @@ model: Hubble-4B-v1-Q4_K_M.gguf files: - filename: Hubble-4B-v1-Q4_K_M.gguf - sha256: f8f44b0fb69e382cf0099a674959ab1a1d86e72ccd1bbce5392210019502a513 uri: huggingface://TheDrummer/Hubble-4B-v1-GGUF/Hubble-4B-v1-Q4_K_M.gguf + sha256: 0721294d0e861c6e6162a112fc7242e0c4b260c156137f4bcbb08667f1748080 ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From c2804c42fe43da1b5d16feb8683bbb3c401e840f Mon Sep 17 00:00:00 2001 From: Dave Date: Mon, 2 Sep 2024 09:48:53 -0400 Subject: [PATCH 0050/1530] fix: untangle pkg/grpc and core/schema for Transcription (#3419) untangle pkg/grpc and core/schema in Transcribe Signed-off-by: Dave Lee --- Makefile | 2 +- backend/go/transcribe/transcript.go | 104 ------------------- backend/go/transcribe/whisper.go | 26 ----- backend/go/transcribe/{ => whisper}/main.go | 0 backend/go/transcribe/whisper/whisper.go | 105 ++++++++++++++++++++ core/backend/transcript.go | 24 ++++- pkg/grpc/backend.go | 3 +- pkg/grpc/base/base.go | 5 +- pkg/grpc/client.go | 25 +---- pkg/grpc/embed.go | 26 +---- pkg/grpc/interface.go | 3 +- pkg/utils/ffmpeg.go | 25 +++++ 12 files changed, 162 insertions(+), 186 deletions(-) delete mode 100644 backend/go/transcribe/transcript.go delete mode 100644 backend/go/transcribe/whisper.go rename backend/go/transcribe/{ => whisper}/main.go (100%) create mode 100644 backend/go/transcribe/whisper/whisper.go create mode 100644 pkg/utils/ffmpeg.go diff --git a/Makefile b/Makefile index be80d875..a360fe88 100644 --- a/Makefile +++ b/Makefile @@ -846,7 +846,7 @@ endif backend-assets/grpc/whisper: sources/whisper.cpp sources/whisper.cpp/libwhisper.a backend-assets/grpc CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH="$(CURDIR)/sources/whisper.cpp/include:$(CURDIR)/sources/whisper.cpp/ggml/include" LIBRARY_PATH=$(CURDIR)/sources/whisper.cpp \ - $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/whisper ./backend/go/transcribe/ + $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/whisper ./backend/go/transcribe/whisper ifneq ($(UPX),) $(UPX) backend-assets/grpc/whisper endif diff --git a/backend/go/transcribe/transcript.go b/backend/go/transcribe/transcript.go deleted file mode 100644 index 6831167f..00000000 --- a/backend/go/transcribe/transcript.go +++ /dev/null @@ -1,104 +0,0 @@ -package main - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - - "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" - "github.com/go-audio/wav" - "github.com/mudler/LocalAI/core/schema" -) - -func ffmpegCommand(args []string) (string, error) { - cmd := exec.Command("ffmpeg", args...) // Constrain this to ffmpeg to permit security scanner to see that the command is safe. - cmd.Env = os.Environ() - out, err := cmd.CombinedOutput() - return string(out), err -} - -// AudioToWav converts audio to wav for transcribe. -// TODO: use https://github.com/mccoyst/ogg? -func audioToWav(src, dst string) error { - commandArgs := []string{"-i", src, "-format", "s16le", "-ar", "16000", "-ac", "1", "-acodec", "pcm_s16le", dst} - out, err := ffmpegCommand(commandArgs) - if err != nil { - return fmt.Errorf("error: %w out: %s", err, out) - } - return nil -} - -func Transcript(model whisper.Model, audiopath, language string, translate bool, threads uint) (schema.TranscriptionResult, error) { - res := schema.TranscriptionResult{} - - dir, err := os.MkdirTemp("", "whisper") - if err != nil { - return res, err - } - defer os.RemoveAll(dir) - - convertedPath := filepath.Join(dir, "converted.wav") - - if err := audioToWav(audiopath, convertedPath); err != nil { - return res, err - } - - // Open samples - fh, err := os.Open(convertedPath) - if err != nil { - return res, err - } - defer fh.Close() - - // Read samples - d := wav.NewDecoder(fh) - buf, err := d.FullPCMBuffer() - if err != nil { - return res, err - } - - data := buf.AsFloat32Buffer().Data - - // Process samples - context, err := model.NewContext() - if err != nil { - return res, err - - } - - context.SetThreads(threads) - - if language != "" { - context.SetLanguage(language) - } else { - context.SetLanguage("auto") - } - - if translate { - context.SetTranslate(true) - } - - if err := context.Process(data, nil, nil); err != nil { - return res, err - } - - for { - s, err := context.NextSegment() - if err != nil { - break - } - - var tokens []int - for _, t := range s.Tokens { - tokens = append(tokens, t.Id) - } - - segment := schema.Segment{Id: s.Num, Text: s.Text, Start: s.Start, End: s.End, Tokens: tokens} - res.Segments = append(res.Segments, segment) - - res.Text += s.Text - } - - return res, nil -} diff --git a/backend/go/transcribe/whisper.go b/backend/go/transcribe/whisper.go deleted file mode 100644 index 61ae98e9..00000000 --- a/backend/go/transcribe/whisper.go +++ /dev/null @@ -1,26 +0,0 @@ -package main - -// This is a wrapper to statisfy the GRPC service interface -// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc) -import ( - "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" - "github.com/mudler/LocalAI/core/schema" - "github.com/mudler/LocalAI/pkg/grpc/base" - pb "github.com/mudler/LocalAI/pkg/grpc/proto" -) - -type Whisper struct { - base.SingleThread - whisper whisper.Model -} - -func (sd *Whisper) Load(opts *pb.ModelOptions) error { - // Note: the Model here is a path to a directory containing the model files - w, err := whisper.New(opts.ModelFile) - sd.whisper = w - return err -} - -func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (schema.TranscriptionResult, error) { - return Transcript(sd.whisper, opts.Dst, opts.Language, opts.Translate, uint(opts.Threads)) -} diff --git a/backend/go/transcribe/main.go b/backend/go/transcribe/whisper/main.go similarity index 100% rename from backend/go/transcribe/main.go rename to backend/go/transcribe/whisper/main.go diff --git a/backend/go/transcribe/whisper/whisper.go b/backend/go/transcribe/whisper/whisper.go new file mode 100644 index 00000000..63416bb3 --- /dev/null +++ b/backend/go/transcribe/whisper/whisper.go @@ -0,0 +1,105 @@ +package main + +// This is a wrapper to statisfy the GRPC service interface +// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc) +import ( + "os" + "path/filepath" + + "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper" + "github.com/go-audio/wav" + "github.com/mudler/LocalAI/pkg/grpc/base" + pb "github.com/mudler/LocalAI/pkg/grpc/proto" + "github.com/mudler/LocalAI/pkg/utils" +) + +type Whisper struct { + base.SingleThread + whisper whisper.Model +} + +func (sd *Whisper) Load(opts *pb.ModelOptions) error { + // Note: the Model here is a path to a directory containing the model files + w, err := whisper.New(opts.ModelFile) + sd.whisper = w + return err +} + +func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) { + + dir, err := os.MkdirTemp("", "whisper") + if err != nil { + return pb.TranscriptResult{}, err + } + defer os.RemoveAll(dir) + + convertedPath := filepath.Join(dir, "converted.wav") + + if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil { + return pb.TranscriptResult{}, err + } + + // Open samples + fh, err := os.Open(convertedPath) + if err != nil { + return pb.TranscriptResult{}, err + } + defer fh.Close() + + // Read samples + d := wav.NewDecoder(fh) + buf, err := d.FullPCMBuffer() + if err != nil { + return pb.TranscriptResult{}, err + } + + data := buf.AsFloat32Buffer().Data + + // Process samples + context, err := sd.whisper.NewContext() + if err != nil { + return pb.TranscriptResult{}, err + + } + + context.SetThreads(uint(opts.Threads)) + + if opts.Language != "" { + context.SetLanguage(opts.Language) + } else { + context.SetLanguage("auto") + } + + if opts.Translate { + context.SetTranslate(true) + } + + if err := context.Process(data, nil, nil); err != nil { + return pb.TranscriptResult{}, err + } + + segments := []*pb.TranscriptSegment{} + text := "" + for { + s, err := context.NextSegment() + if err != nil { + break + } + + var tokens []int32 + for _, t := range s.Tokens { + tokens = append(tokens, int32(t.Id)) + } + + segment := &pb.TranscriptSegment{Id: int32(s.Num), Text: s.Text, Start: int64(s.Start), End: int64(s.End), Tokens: tokens} + segments = append(segments, segment) + + text += s.Text + } + + return pb.TranscriptResult{ + Segments: segments, + Text: text, + }, nil + +} diff --git a/core/backend/transcript.go b/core/backend/transcript.go index 0980288f..ed3e24a5 100644 --- a/core/backend/transcript.go +++ b/core/backend/transcript.go @@ -3,6 +3,7 @@ package backend import ( "context" "fmt" + "time" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" @@ -30,10 +31,31 @@ func ModelTranscription(audio, language string, translate bool, ml *model.ModelL return nil, fmt.Errorf("could not load whisper model") } - return whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{ + r, err := whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{ Dst: audio, Language: language, Translate: translate, Threads: uint32(*backendConfig.Threads), }) + if err != nil { + return nil, err + } + tr := &schema.TranscriptionResult{ + Text: r.Text, + } + for _, s := range r.Segments { + var tks []int + for _, t := range s.Tokens { + tks = append(tks, int(t)) + } + tr.Segments = append(tr.Segments, + schema.Segment{ + Text: s.Text, + Id: int(s.Id), + Start: time.Duration(s.Start), + End: time.Duration(s.End), + Tokens: tks, + }) + } + return tr, err } diff --git a/pkg/grpc/backend.go b/pkg/grpc/backend.go index 3821678c..85c9e5bc 100644 --- a/pkg/grpc/backend.go +++ b/pkg/grpc/backend.go @@ -3,7 +3,6 @@ package grpc import ( "context" - "github.com/mudler/LocalAI/core/schema" pb "github.com/mudler/LocalAI/pkg/grpc/proto" "google.golang.org/grpc" ) @@ -42,7 +41,7 @@ type Backend interface { GenerateImage(ctx context.Context, in *pb.GenerateImageRequest, opts ...grpc.CallOption) (*pb.Result, error) TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOption) (*pb.Result, error) SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequest, opts ...grpc.CallOption) (*pb.Result, error) - AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.TranscriptionResult, error) + AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*pb.TranscriptResult, error) TokenizeString(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.TokenizationResponse, error) Status(ctx context.Context) (*pb.StatusResponse, error) diff --git a/pkg/grpc/base/base.go b/pkg/grpc/base/base.go index 21dd1578..95dca561 100644 --- a/pkg/grpc/base/base.go +++ b/pkg/grpc/base/base.go @@ -6,7 +6,6 @@ import ( "fmt" "os" - "github.com/mudler/LocalAI/core/schema" pb "github.com/mudler/LocalAI/pkg/grpc/proto" gopsutil "github.com/shirou/gopsutil/v3/process" ) @@ -53,8 +52,8 @@ func (llm *Base) GenerateImage(*pb.GenerateImageRequest) error { return fmt.Errorf("unimplemented") } -func (llm *Base) AudioTranscription(*pb.TranscriptRequest) (schema.TranscriptionResult, error) { - return schema.TranscriptionResult{}, fmt.Errorf("unimplemented") +func (llm *Base) AudioTranscription(*pb.TranscriptRequest) (pb.TranscriptResult, error) { + return pb.TranscriptResult{}, fmt.Errorf("unimplemented") } func (llm *Base) TTS(*pb.TTSRequest) error { diff --git a/pkg/grpc/client.go b/pkg/grpc/client.go index b654e9c9..032c9c00 100644 --- a/pkg/grpc/client.go +++ b/pkg/grpc/client.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "github.com/mudler/LocalAI/core/schema" pb "github.com/mudler/LocalAI/pkg/grpc/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -228,7 +227,7 @@ func (c *Client) SoundGeneration(ctx context.Context, in *pb.SoundGenerationRequ return client.SoundGeneration(ctx, in, opts...) } -func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.TranscriptionResult, error) { +func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*pb.TranscriptResult, error) { if !c.parallel { c.opMutex.Lock() defer c.opMutex.Unlock() @@ -243,27 +242,7 @@ func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptReques } defer conn.Close() client := pb.NewBackendClient(conn) - res, err := client.AudioTranscription(ctx, in, opts...) - if err != nil { - return nil, err - } - tresult := &schema.TranscriptionResult{} - for _, s := range res.Segments { - tks := []int{} - for _, t := range s.Tokens { - tks = append(tks, int(t)) - } - tresult.Segments = append(tresult.Segments, - schema.Segment{ - Text: s.Text, - Id: int(s.Id), - Start: time.Duration(s.Start), - End: time.Duration(s.End), - Tokens: tks, - }) - } - tresult.Text = res.Text - return tresult, err + return client.AudioTranscription(ctx, in, opts...) } func (c *Client) TokenizeString(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.TokenizationResponse, error) { diff --git a/pkg/grpc/embed.go b/pkg/grpc/embed.go index 67d83e27..3155ff59 100644 --- a/pkg/grpc/embed.go +++ b/pkg/grpc/embed.go @@ -2,9 +2,7 @@ package grpc import ( "context" - "time" - "github.com/mudler/LocalAI/core/schema" pb "github.com/mudler/LocalAI/pkg/grpc/proto" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -57,28 +55,8 @@ func (e *embedBackend) SoundGeneration(ctx context.Context, in *pb.SoundGenerati return e.s.SoundGeneration(ctx, in) } -func (e *embedBackend) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.TranscriptionResult, error) { - r, err := e.s.AudioTranscription(ctx, in) - if err != nil { - return nil, err - } - tr := &schema.TranscriptionResult{} - for _, s := range r.Segments { - var tks []int - for _, t := range s.Tokens { - tks = append(tks, int(t)) - } - tr.Segments = append(tr.Segments, - schema.Segment{ - Text: s.Text, - Id: int(s.Id), - Start: time.Duration(s.Start), - End: time.Duration(s.End), - Tokens: tks, - }) - } - tr.Text = r.Text - return tr, err +func (e *embedBackend) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*pb.TranscriptResult, error) { + return e.s.AudioTranscription(ctx, in) } func (e *embedBackend) TokenizeString(ctx context.Context, in *pb.PredictOptions, opts ...grpc.CallOption) (*pb.TokenizationResponse, error) { diff --git a/pkg/grpc/interface.go b/pkg/grpc/interface.go index 731dcd5b..97b958cc 100644 --- a/pkg/grpc/interface.go +++ b/pkg/grpc/interface.go @@ -1,7 +1,6 @@ package grpc import ( - "github.com/mudler/LocalAI/core/schema" pb "github.com/mudler/LocalAI/pkg/grpc/proto" ) @@ -15,7 +14,7 @@ type LLM interface { Load(*pb.ModelOptions) error Embeddings(*pb.PredictOptions) ([]float32, error) GenerateImage(*pb.GenerateImageRequest) error - AudioTranscription(*pb.TranscriptRequest) (schema.TranscriptionResult, error) + AudioTranscription(*pb.TranscriptRequest) (pb.TranscriptResult, error) TTS(*pb.TTSRequest) error SoundGeneration(*pb.SoundGenerationRequest) error TokenizeString(*pb.PredictOptions) (pb.TokenizationResponse, error) diff --git a/pkg/utils/ffmpeg.go b/pkg/utils/ffmpeg.go new file mode 100644 index 00000000..16656d8e --- /dev/null +++ b/pkg/utils/ffmpeg.go @@ -0,0 +1,25 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" +) + +func ffmpegCommand(args []string) (string, error) { + cmd := exec.Command("ffmpeg", args...) // Constrain this to ffmpeg to permit security scanner to see that the command is safe. + cmd.Env = os.Environ() + out, err := cmd.CombinedOutput() + return string(out), err +} + +// AudioToWav converts audio to wav for transcribe. +// TODO: use https://github.com/mccoyst/ogg? +func AudioToWav(src, dst string) error { + commandArgs := []string{"-i", src, "-format", "s16le", "-ar", "16000", "-ac", "1", "-acodec", "pcm_s16le", dst} + out, err := ffmpegCommand(commandArgs) + if err != nil { + return fmt.Errorf("error: %w out: %s", err, out) + } + return nil +} From 56db715a91f82eb6591d61a7f95c352c65277cf3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 2 Sep 2024 17:41:56 +0200 Subject: [PATCH 0051/1530] chore(deps): update whisper.cpp (#3443) * chore(deps): update whisper.cpp Signed-off-by: Ettore Di Giacinto * chore: use clang Signed-off-by: Ettore Di Giacinto * fix: path for x86 is different Signed-off-by: Ettore Di Giacinto * test: try to skip whisper on mac x86_64 Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- .github/workflows/release.yaml | 6 +++--- .github/workflows/test.yml | 3 ++- Makefile | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 5c883db4..a1318b19 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -294,7 +294,7 @@ jobs: export C_INCLUDE_PATH=/usr/local/include export CPLUS_INCLUDE_PATH=/usr/local/include export PATH=$PATH:$GOPATH/bin - + export SKIP_GRPC_BACKEND=backend-assets/grpc/whisper make dist - uses: actions/upload-artifact@v4 with: @@ -327,7 +327,7 @@ jobs: cache: false - name: Dependencies run: | - brew install protobuf grpc + brew install protobuf grpc libomp llvm go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 - name: Build @@ -336,7 +336,7 @@ jobs: export C_INCLUDE_PATH=/usr/local/include export CPLUS_INCLUDE_PATH=/usr/local/include export PATH=$PATH:$GOPATH/bin - + export CC=/opt/homebrew/opt/llvm/bin/clang make dist - uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e6efe77f..2af3fd00 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -214,12 +214,13 @@ jobs: run: go version - name: Dependencies run: | - brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc + brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm pip install --user --no-cache-dir grpcio-tools==1.64.1 - name: Test run: | export C_INCLUDE_PATH=/usr/local/include export CPLUS_INCLUDE_PATH=/usr/local/include + export CC=/opt/homebrew/opt/llvm/bin/clang # Used to run the newer GNUMake version from brew that supports --output-sync export PATH="/opt/homebrew/opt/make/libexec/gnubin:$PATH" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test diff --git a/Makefile b/Makefile index a360fe88..43d711bd 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=9e3c5345cd46ea718209db53464e426c3fe7a25e +WHISPER_CPP_VERSION?=c96906d84dd6a1c40ea797ad542df3a0c47307a3 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 68fc014c6d5797449d9b9e793d7f1ec77e2bbf8a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 2 Sep 2024 21:44:32 +0200 Subject: [PATCH 0052/1530] feat(vllm): add support for embeddings (#3440) Signed-off-by: Ettore Di Giacinto --- backend/python/vllm/backend.py | 20 ++++++++++++++++++++ backend/python/vllm/test.py | 23 +++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index 2d8b55db..2cf15c1c 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -135,6 +135,26 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): res = await gen.__anext__() return res + def Embedding(self, request, context): + """ + A gRPC method that calculates embeddings for a given sentence. + + Args: + request: An EmbeddingRequest object that contains the request parameters. + context: A grpc.ServicerContext object that provides information about the RPC. + + Returns: + An EmbeddingResult object that contains the calculated embeddings. + """ + print("Calculated embeddings for: " + request.Embeddings, file=sys.stderr) + outputs = self.model.encode(request.Embeddings) + # Check if we have one result at least + if len(outputs) == 0: + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + context.set_details("No embeddings were calculated.") + return backend_pb2.EmbeddingResult() + return backend_pb2.EmbeddingResult(embeddings=outputs[0].outputs.embedding) + async def PredictStream(self, request, context): """ Generates text based on the given prompt and sampling parameters, and streams the results. diff --git a/backend/python/vllm/test.py b/backend/python/vllm/test.py index 83fb2651..9f325b10 100644 --- a/backend/python/vllm/test.py +++ b/backend/python/vllm/test.py @@ -72,5 +72,28 @@ class TestBackendServicer(unittest.TestCase): except Exception as err: print(err) self.fail("text service failed") + finally: + self.tearDown() + + def test_embedding(self): + """ + This method tests if the embeddings are generated successfully + """ + try: + self.setUp() + with grpc.insecure_channel("localhost:50051") as channel: + stub = backend_pb2_grpc.BackendStub(channel) + response = stub.LoadModel(backend_pb2.ModelOptions(Model="intfloat/e5-mistral-7b-instruct")) + self.assertTrue(response.success) + embedding_request = backend_pb2.PredictOptions(Embeddings="This is a test sentence.") + embedding_response = stub.Embedding(embedding_request) + self.assertIsNotNone(embedding_response.embeddings) + # assert that is a list of floats + self.assertIsInstance(embedding_response.embeddings, list) + # assert that the list is not empty + self.assertTrue(len(embedding_response.embeddings) > 0) + except Exception as err: + print(err) + self.fail("Embedding service failed") finally: self.tearDown() \ No newline at end of file From 589a2ac8694946ac15b612c1d8c90b1a8c10ea33 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:19:51 +0000 Subject: [PATCH 0053/1530] chore(deps): Bump langchain from 0.2.14 to 0.2.15 in /examples/functions (#3453) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.14 to 0.2.15. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.14...langchain==0.2.15) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 5713a163..429e5660 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.2.14 +langchain==0.2.15 openai==1.42.0 From b95c5233851264df82809a5cf2a117480359ba63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:31:38 +0000 Subject: [PATCH 0054/1530] chore(deps): Bump certifi from 2024.7.4 to 2024.8.30 in /examples/langchain/langchainpy-localai-example (#3457) chore(deps): Bump certifi Bumps [certifi](https://github.com/certifi/python-certifi) from 2024.7.4 to 2024.8.30. - [Commits](https://github.com/certifi/python-certifi/compare/2024.07.04...2024.08.30) --- updated-dependencies: - dependency-name: certifi dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 223a4ea4..28cbdb0a 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -2,7 +2,7 @@ aiohttp==3.10.3 aiosignal==1.3.1 async-timeout==4.0.3 attrs==24.2.0 -certifi==2024.7.4 +certifi==2024.8.30 charset-normalizer==3.3.2 colorama==0.4.6 dataclasses-json==0.6.7 From 71be0669378b1c6c7cda40c88254e943173e3cfe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:59:37 +0000 Subject: [PATCH 0055/1530] chore(deps): Bump yarl from 1.9.4 to 1.9.7 in /examples/langchain/langchainpy-localai-example (#3459) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.9.4 to 1.9.7. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.9.4...v1.9.7) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 28cbdb0a..9d5b5518 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.2 -yarl==1.9.4 +yarl==1.9.7 From 0ff1b7f8f7abd6efe5356295ed04d5bedb8c65e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 23:03:58 +0000 Subject: [PATCH 0056/1530] chore(deps): Bump langchain-community from 0.2.12 to 0.2.15 in /examples/langchain/langchainpy-localai-example (#3461) chore(deps): Bump langchain-community Bumps [langchain-community](https://github.com/langchain-ai/langchain) from 0.2.12 to 0.2.15. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain-community==0.2.12...langchain-community==0.2.15) --- updated-dependencies: - dependency-name: langchain-community dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 9d5b5518..e5ac55bc 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -11,7 +11,7 @@ frozenlist==1.4.1 greenlet==3.0.3 idna==3.8 langchain==0.2.14 -langchain-community==0.2.12 +langchain-community==0.2.15 marshmallow==3.22.0 marshmallow-enum==1.5.1 multidict==6.0.5 From 5da07b0a847470fecd89ae33fcef3a332ca26c3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 23:18:14 +0000 Subject: [PATCH 0057/1530] chore(deps): Bump llama-index from 0.11.1 to 0.11.4 in /examples/chainlit (#3462) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.1 to 0.11.4. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.1...v0.11.4) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index b5ea6cf4..8654ea99 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.1 +llama_index==0.11.4 requests==2.32.3 weaviate_client==4.6.7 transformers From 22e30fccbc9a50c718e70ee015d1b95b372a6a18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 00:38:23 +0000 Subject: [PATCH 0058/1530] chore(deps): Bump llama-index from 0.11.1 to 0.11.4 in /examples/langchain-chroma (#3467) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.1 to 0.11.4. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.1...v0.11.4) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 4e80315b..fc6e7710 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.14 openai==1.42.0 chromadb==0.5.5 -llama-index==0.11.1 \ No newline at end of file +llama-index==0.11.4 \ No newline at end of file From 70f6d80677df63f7e8ecb314034cc6db42b6d2d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 02:44:41 +0000 Subject: [PATCH 0059/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `3a0ae52` to `550a6ee` (#3472) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `3a0ae52` to `550a6ee`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/3a0ae52e610bbf99e6a4182a1d7889954e9b7d26...550a6eeb9252da5ca729f25dc91df6dd3ee9d5ce) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index 3a0ae52e..550a6eeb 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit 3a0ae52e610bbf99e6a4182a1d7889954e9b7d26 +Subproject commit 550a6eeb9252da5ca729f25dc91df6dd3ee9d5ce From e2d40d0fcc8b59c5a04ce70d991cdc749b8bbd12 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 3 Sep 2024 05:48:23 +0200 Subject: [PATCH 0060/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `48baa61eccdca9205daf8d620ba28055c2347b64` (#3474) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 43d711bd..488029fc 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=8f1d81a0b6f50b9bad72db0b6fcd299ad9ecd48c +CPPLLAMA_VERSION?=48baa61eccdca9205daf8d620ba28055c2347b64 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 923e4cce853f0c5311edd88fc200d3c9c1c95842 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 04:26:08 +0000 Subject: [PATCH 0061/1530] chore(deps): Bump openai from 1.42.0 to 1.43.0 in /examples/functions (#3452) Bumps [openai](https://github.com/openai/openai-python) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 429e5660..85f18c9f 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.2.15 -openai==1.42.0 +openai==1.43.0 From a7998e0263432df9770ce69692e02d99b0efef69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 05:39:23 +0000 Subject: [PATCH 0062/1530] chore(deps): Bump langchain from 0.2.14 to 0.2.15 in /examples/langchain/langchainpy-localai-example (#3460) chore(deps): Bump langchain Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.14 to 0.2.15. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.14...langchain==0.2.15) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index e5ac55bc..1aaca044 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -10,7 +10,7 @@ debugpy==1.8.2 frozenlist==1.4.1 greenlet==3.0.3 idna==3.8 -langchain==0.2.14 +langchain==0.2.15 langchain-community==0.2.15 marshmallow==3.22.0 marshmallow-enum==1.5.1 From 0180bcf22ad20209174c61bebce8d1247c1ef013 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 05:54:22 +0000 Subject: [PATCH 0063/1530] chore(deps): Bump openai from 1.42.0 to 1.43.0 in /examples/langchain-chroma (#3468) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index fc6e7710..1214421f 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.14 -openai==1.42.0 +openai==1.43.0 chromadb==0.5.5 llama-index==0.11.4 \ No newline at end of file From 8ac79cfc3353543281b18d51fdd20fc20d8eb1ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 07:30:55 +0000 Subject: [PATCH 0064/1530] chore(deps): Bump langchain from 0.2.14 to 0.2.15 in /examples/langchain-chroma (#3466) chore(deps): Bump langchain in /examples/langchain-chroma Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.14 to 0.2.15. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.14...langchain==0.2.15) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 1214421f..08d575d0 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ -langchain==0.2.14 +langchain==0.2.15 openai==1.43.0 chromadb==0.5.5 llama-index==0.11.4 \ No newline at end of file From dbe1e652bcdf3e9ee673d0ceff1f73de88b58c5a Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 3 Sep 2024 10:08:05 +0200 Subject: [PATCH 0065/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `5236f0278420ab776d1787c4330678d80219b4b6` (#3475) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 488029fc..7efe2635 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=c96906d84dd6a1c40ea797ad542df3a0c47307a3 +WHISPER_CPP_VERSION?=5236f0278420ab776d1787c4330678d80219b4b6 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From bf87943da7e610d3e4820dddbccd468beb517817 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 3 Sep 2024 23:42:11 +0200 Subject: [PATCH 0066/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `8962422b1c6f9b8b15f5aeaea42600bcc2d44177` (#3478) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7efe2635..5d0e54b4 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=48baa61eccdca9205daf8d620ba28055c2347b64 +CPPLLAMA_VERSION?=8962422b1c6f9b8b15f5aeaea42600bcc2d44177 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 47bc72343ce7f6844bcad57e53d870f8a8287796 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 21:56:55 +0000 Subject: [PATCH 0067/1530] chore(deps): Bump streamlit from 1.37.1 to 1.38.0 in /examples/streamlit-bot (#3465) chore(deps): Bump streamlit in /examples/streamlit-bot Bumps [streamlit](https://github.com/streamlit/streamlit) from 1.37.1 to 1.38.0. - [Release notes](https://github.com/streamlit/streamlit/releases) - [Commits](https://github.com/streamlit/streamlit/compare/1.37.1...1.38.0) --- updated-dependencies: - dependency-name: streamlit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/streamlit-bot/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/streamlit-bot/requirements.txt b/examples/streamlit-bot/requirements.txt index 17e1bee0..fa8c4118 100644 --- a/examples/streamlit-bot/requirements.txt +++ b/examples/streamlit-bot/requirements.txt @@ -1,2 +1,2 @@ -streamlit==1.37.1 +streamlit==1.38.0 requests \ No newline at end of file From f336c1a7b83bf320d9ca22dd66627a7c968a18a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 22:51:01 +0000 Subject: [PATCH 0068/1530] chore(deps): Bump openai from 1.42.0 to 1.43.0 in /examples/langchain/langchainpy-localai-example (#3456) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.42.0 to 1.43.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.42.0...v1.43.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 1aaca044..242e5618 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.0.5 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.0 -openai==1.42.0 +openai==1.43.0 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.8.2 From 6b72bdcb0aa8c70acefa4a311a9a2b80bedfc4a4 Mon Sep 17 00:00:00 2001 From: Dave Date: Wed, 4 Sep 2024 10:29:09 -0400 Subject: [PATCH 0069/1530] fix: purge a few remaining runway model references (#3480) purge a few remaining runway model references Signed-off-by: Dave Lee --- aio/intel/image-gen.yaml | 2 +- backend/python/diffusers/backend.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aio/intel/image-gen.yaml b/aio/intel/image-gen.yaml index eb724c92..45fe6b62 100644 --- a/aio/intel/image-gen.yaml +++ b/aio/intel/image-gen.yaml @@ -1,6 +1,6 @@ name: stablediffusion parameters: - model: runwayml/stable-diffusion-v1-5 + model: Lykon/dreamshaper-8 backend: diffusers step: 25 f16: true diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index 8f420848..e7ad1cdd 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -168,7 +168,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): if request.CFGScale != 0: self.cfg_scale = request.CFGScale - clipmodel = "runwayml/stable-diffusion-v1-5" + clipmodel = "Lykon/dreamshaper-8" if request.CLIPModel != "": clipmodel = request.CLIPModel clipsubfolder = "text_encoder" From a9f438e1e6b4863ddcfe3eb7e060c29596a08e69 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 5 Sep 2024 08:15:46 +0200 Subject: [PATCH 0070/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `581c305186a0ff93f360346c57e21fe16e967bb7` (#3481) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5d0e54b4..b933b4f7 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=8962422b1c6f9b8b15f5aeaea42600bcc2d44177 +CPPLLAMA_VERSION?=581c305186a0ff93f360346c57e21fe16e967bb7 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From ac5d655598ffe431cb70a32f33910e6c6f9757f9 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 5 Sep 2024 17:49:27 +0200 Subject: [PATCH 0071/1530] models(gallery): add yi-coder (and variants) (#3482) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 98 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index da93ecd3..afc7d6f5 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3193,6 +3193,104 @@ - filename: magnum-v3-34b-Q4_K_M.gguf sha256: f902956c0731581f1ff189e547e6e5aad86b77af5f4dc7e4fc26bcda5c1f7cc3 uri: huggingface://bartowski/magnum-v3-34b-GGUF/magnum-v3-34b-Q4_K_M.gguf +- !!merge <<: *yi-chat + name: "yi-coder-9b-chat" + urls: + - https://huggingface.co/01-ai/Yi-Coder-9B-Chat + - https://huggingface.co/bartowski/Yi-Coder-9B-Chat-GGUF + - https://01-ai.github.io/ + - https://github.com/01-ai/Yi-Coder + description: | + Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters. + Key features: + + Excelling in long-context understanding with a maximum context length of 128K tokens. + Supporting 52 major programming languages: + + 'java', 'markdown', 'python', 'php', 'javascript', 'c++', 'c#', 'c', 'typescript', 'html', 'go', 'java_server_pages', 'dart', 'objective-c', 'kotlin', 'tex', 'swift', 'ruby', 'sql', 'rust', 'css', 'yaml', 'matlab', 'lua', 'json', 'shell', 'visual_basic', 'scala', 'rmarkdown', 'pascal', 'fortran', 'haskell', 'assembly', 'perl', 'julia', 'cmake', 'groovy', 'ocaml', 'powershell', 'elixir', 'clojure', 'makefile', 'coffeescript', 'erlang', 'lisp', 'toml', 'batchfile', 'cobol', 'dockerfile', 'r', 'prolog', 'verilog' + + For model details and benchmarks, see Yi-Coder blog and Yi-Coder README. + overrides: + parameters: + model: Yi-Coder-9B-Chat-Q4_K_M.gguf + files: + - filename: Yi-Coder-9B-Chat-Q4_K_M.gguf + sha256: 251cc196e3813d149694f362bb0f8f154f3320abe44724eebe58c23dc54f201d + uri: huggingface://bartowski/Yi-Coder-9B-Chat-GGUF/Yi-Coder-9B-Chat-Q4_K_M.gguf +- !!merge <<: *yi-chat + name: "yi-coder-1.5b-chat" + urls: + - https://huggingface.co/01-ai/Yi-Coder-1.5B-Chat + - https://huggingface.co/MaziyarPanahi/Yi-Coder-1.5B-Chat-GGUF + - https://01-ai.github.io/ + - https://github.com/01-ai/Yi-Coder + description: | + Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters. + Key features: + + Excelling in long-context understanding with a maximum context length of 128K tokens. + Supporting 52 major programming languages: + + 'java', 'markdown', 'python', 'php', 'javascript', 'c++', 'c#', 'c', 'typescript', 'html', 'go', 'java_server_pages', 'dart', 'objective-c', 'kotlin', 'tex', 'swift', 'ruby', 'sql', 'rust', 'css', 'yaml', 'matlab', 'lua', 'json', 'shell', 'visual_basic', 'scala', 'rmarkdown', 'pascal', 'fortran', 'haskell', 'assembly', 'perl', 'julia', 'cmake', 'groovy', 'ocaml', 'powershell', 'elixir', 'clojure', 'makefile', 'coffeescript', 'erlang', 'lisp', 'toml', 'batchfile', 'cobol', 'dockerfile', 'r', 'prolog', 'verilog' + + For model details and benchmarks, see Yi-Coder blog and Yi-Coder README. + overrides: + parameters: + model: Yi-Coder-1.5B-Chat.Q4_K_M.gguf + files: + - filename: Yi-Coder-1.5B-Chat.Q4_K_M.gguf + sha256: e2e8fa659cd75c828d7783b5c2fb60d220e08836065901fad8edb48e537c1cec + uri: huggingface://MaziyarPanahi/Yi-Coder-1.5B-Chat-GGUF/Yi-Coder-1.5B-Chat.Q4_K_M.gguf +- !!merge <<: *yi-chat + url: "github:mudler/LocalAI/gallery/codellama.yaml@master" + name: "yi-coder-1.5b" + urls: + - https://huggingface.co/01-ai/Yi-Coder-1.5B + - https://huggingface.co/QuantFactory/Yi-Coder-1.5B-GGUF + - https://01-ai.github.io/ + - https://github.com/01-ai/Yi-Coder + description: | + Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters. + Key features: + + Excelling in long-context understanding with a maximum context length of 128K tokens. + Supporting 52 major programming languages: + + 'java', 'markdown', 'python', 'php', 'javascript', 'c++', 'c#', 'c', 'typescript', 'html', 'go', 'java_server_pages', 'dart', 'objective-c', 'kotlin', 'tex', 'swift', 'ruby', 'sql', 'rust', 'css', 'yaml', 'matlab', 'lua', 'json', 'shell', 'visual_basic', 'scala', 'rmarkdown', 'pascal', 'fortran', 'haskell', 'assembly', 'perl', 'julia', 'cmake', 'groovy', 'ocaml', 'powershell', 'elixir', 'clojure', 'makefile', 'coffeescript', 'erlang', 'lisp', 'toml', 'batchfile', 'cobol', 'dockerfile', 'r', 'prolog', 'verilog' + + For model details and benchmarks, see Yi-Coder blog and Yi-Coder README. + overrides: + parameters: + model: Yi-Coder-1.5B.Q4_K_M.gguf + files: + - filename: Yi-Coder-1.5B.Q4_K_M.gguf + sha256: 86a280dd36c9b2342b7023532f9c2c287e251f5cd10bc81ca262db8c1668f272 + uri: huggingface://QuantFactory/Yi-Coder-1.5B-GGUF/Yi-Coder-1.5B.Q4_K_M.gguf +- !!merge <<: *yi-chat + url: "github:mudler/LocalAI/gallery/codellama.yaml@master" + name: "yi-coder-9b" + urls: + - https://huggingface.co/01-ai/Yi-Coder-9B + - https://huggingface.co/QuantFactory/Yi-Coder-9B-GGUF + - https://01-ai.github.io/ + - https://github.com/01-ai/Yi-Coder + description: | + Yi-Coder is a series of open-source code language models that delivers state-of-the-art coding performance with fewer than 10 billion parameters. + Key features: + + Excelling in long-context understanding with a maximum context length of 128K tokens. + Supporting 52 major programming languages: + + 'java', 'markdown', 'python', 'php', 'javascript', 'c++', 'c#', 'c', 'typescript', 'html', 'go', 'java_server_pages', 'dart', 'objective-c', 'kotlin', 'tex', 'swift', 'ruby', 'sql', 'rust', 'css', 'yaml', 'matlab', 'lua', 'json', 'shell', 'visual_basic', 'scala', 'rmarkdown', 'pascal', 'fortran', 'haskell', 'assembly', 'perl', 'julia', 'cmake', 'groovy', 'ocaml', 'powershell', 'elixir', 'clojure', 'makefile', 'coffeescript', 'erlang', 'lisp', 'toml', 'batchfile', 'cobol', 'dockerfile', 'r', 'prolog', 'verilog' + + For model details and benchmarks, see Yi-Coder blog and Yi-Coder README. + overrides: + parameters: + model: Yi-Coder-9B.Q4_K_M.gguf + files: + - filename: Yi-Coder-9B.Q4_K_M.gguf + sha256: cff3db8a69c43654e3c2d2984e86ad2791d1d446ec56b24a636ba1ce78363308 + uri: huggingface://QuantFactory/Yi-Coder-9B-GGUF/Yi-Coder-9B.Q4_K_M.gguf - &vicuna-chat ## LLama2 and derivatives ### Start Fimbulvetr From 791c3ace72cb1e21337da74d20432fa174a844f5 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 5 Sep 2024 20:44:30 +0200 Subject: [PATCH 0072/1530] feat: add endpoint to list system informations (#3449) * feat: add endpoint to list system informations For now, it lists the available backends, but can be expanded later on to include more system informations (such as GPU devices detected, RAM, threads configured, and so on so forth). Signed-off-by: Ettore Di Giacinto * show also external backends Signed-off-by: Ettore Di Giacinto * add test Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- core/http/app_test.go | 11 ++++++++++ core/http/endpoints/localai/system.go | 29 +++++++++++++++++++++++++++ core/http/routes/localai.go | 2 ++ core/schema/localai.go | 4 ++++ pkg/model/initializers.go | 4 ++++ 5 files changed, 50 insertions(+) create mode 100644 core/http/endpoints/localai/system.go diff --git a/core/http/app_test.go b/core/http/app_test.go index a837e20c..86fe7fdd 100644 --- a/core/http/app_test.go +++ b/core/http/app_test.go @@ -772,6 +772,17 @@ var _ = Describe("API test", func() { Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error:")) }) + It("shows the external backend", func() { + // do an http request to the /system endpoint + resp, err := http.Get("http://127.0.0.1:9090/system") + Expect(err).ToNot(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(200)) + dat, err := io.ReadAll(resp.Body) + Expect(err).ToNot(HaveOccurred()) + Expect(string(dat)).To(ContainSubstring("huggingface")) + Expect(string(dat)).To(ContainSubstring("llama-cpp")) + }) + It("transcribes audio", func() { if runtime.GOOS != "linux" { Skip("test supported only on linux") diff --git a/core/http/endpoints/localai/system.go b/core/http/endpoints/localai/system.go new file mode 100644 index 00000000..11704933 --- /dev/null +++ b/core/http/endpoints/localai/system.go @@ -0,0 +1,29 @@ +package localai + +import ( + "github.com/gofiber/fiber/v2" + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/core/schema" + "github.com/mudler/LocalAI/pkg/model" +) + +// SystemInformations returns the system informations +// @Summary Show the LocalAI instance information +// @Success 200 {object} schema.SystemInformationResponse "Response" +// @Router /system [get] +func SystemInformations(ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(*fiber.Ctx) error { + return func(c *fiber.Ctx) error { + availableBackends, err := ml.ListAvailableBackends(appConfig.AssetsDestination) + if err != nil { + return err + } + for b := range appConfig.ExternalGRPCBackends { + availableBackends = append(availableBackends, b) + } + return c.JSON( + schema.SystemInformationResponse{ + Backends: availableBackends, + }, + ) + } +} diff --git a/core/http/routes/localai.go b/core/http/routes/localai.go index 105991e8..f85fa807 100644 --- a/core/http/routes/localai.go +++ b/core/http/routes/localai.go @@ -70,4 +70,6 @@ func RegisterLocalAIRoutes(app *fiber.App, }{Version: internal.PrintableVersion()}) }) + app.Get("/system", auth, localai.SystemInformations(ml, appConfig)) + } diff --git a/core/schema/localai.go b/core/schema/localai.go index 1b75e384..9070c2be 100644 --- a/core/schema/localai.go +++ b/core/schema/localai.go @@ -70,3 +70,7 @@ type P2PNodesResponse struct { Nodes []p2p.NodeData `json:"nodes" yaml:"nodes"` FederatedNodes []p2p.NodeData `json:"federated_nodes" yaml:"federated_nodes"` } + +type SystemInformationResponse struct { + Backends []string `json:"backends"` +} diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index de0662e6..3d2255cc 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -393,6 +393,10 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string } } +func (ml *ModelLoader) ListAvailableBackends(assetdir string) ([]string, error) { + return backendsInAssetDir(assetdir) +} + func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err error) { o := NewOptions(opts...) From 9a159fbfad3e33fe26ce5d41d9f6a3ec8899214b Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 5 Sep 2024 23:50:54 +0200 Subject: [PATCH 0073/1530] chore(model-gallery): :arrow_up: update checksum (#3486) :arrow_up: Checksum updates in gallery/index.yaml Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- gallery/index.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index afc7d6f5..028d1af4 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1724,7 +1724,7 @@ files: - filename: Meta-Llama-3-8B-Instruct.Q4_0.gguf uri: huggingface://QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct.Q4_0.gguf - sha256: 18c8eb909db870d456a823700b4c82f6259e6052899f0ebf2bddc9b2417cd355 + sha256: 2b4675c2208f09ad8762d8cf1b6a4a26bf65e6f0641aba324ec65143c0b4ad9f - !!merge <<: *llama3 name: "llama3-8b-instruct:Q6_K" overrides: @@ -1733,7 +1733,7 @@ files: - filename: Meta-Llama-3-8B-Instruct.Q6_K.gguf uri: huggingface://QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct.Q6_K.gguf - sha256: 67f8eb2218938a5fd711605d526d2287e9a4ad26849efdf3bf7c0c17dcbde018 + sha256: bd7efd73f9fb67e4b9ecc43f861f37c7e594e78a8a5ff9c29da021692bd243ef - !!merge <<: *llama3 name: "llama-3-8b-instruct-abliterated" urls: From 58c4a6d9d92d2a9492e227ac92b3e46fdd033c2f Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 6 Sep 2024 00:08:12 +0200 Subject: [PATCH 0074/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `4db04784f96757d74f74c8c110c2a00d55e33514` (#3485) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b933b4f7..4b819224 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=581c305186a0ff93f360346c57e21fe16e967bb7 +CPPLLAMA_VERSION?=4db04784f96757d74f74c8c110c2a00d55e33514 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 11c16f529e985bbc477941e690040effff7d228a Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 6 Sep 2024 00:21:24 +0200 Subject: [PATCH 0075/1530] feat(swagger): update swagger (#3484) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- swagger/docs.go | 24 ++++++++++++++++++++++++ swagger/swagger.json | 24 ++++++++++++++++++++++++ swagger/swagger.yaml | 15 +++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/swagger/docs.go b/swagger/docs.go index ced239c4..44da7cf2 100644 --- a/swagger/docs.go +++ b/swagger/docs.go @@ -266,6 +266,19 @@ const docTemplate = `{ } } }, + "/system": { + "get": { + "summary": "Show the LocalAI instance information", + "responses": { + "200": { + "description": "Response", + "schema": { + "$ref": "#/definitions/schema.SystemInformationResponse" + } + } + } + } + }, "/tts": { "post": { "consumes": [ @@ -1649,6 +1662,17 @@ const docTemplate = `{ } } }, + "schema.SystemInformationResponse": { + "type": "object", + "properties": { + "backends": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "schema.TTSRequest": { "description": "TTS request body", "type": "object", diff --git a/swagger/swagger.json b/swagger/swagger.json index c538b539..eaddf451 100644 --- a/swagger/swagger.json +++ b/swagger/swagger.json @@ -259,6 +259,19 @@ } } }, + "/system": { + "get": { + "summary": "Show the LocalAI instance information", + "responses": { + "200": { + "description": "Response", + "schema": { + "$ref": "#/definitions/schema.SystemInformationResponse" + } + } + } + } + }, "/tts": { "post": { "consumes": [ @@ -1642,6 +1655,17 @@ } } }, + "schema.SystemInformationResponse": { + "type": "object", + "properties": { + "backends": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "schema.TTSRequest": { "description": "TTS request body", "type": "object", diff --git a/swagger/swagger.yaml b/swagger/swagger.yaml index 389543fa..c98e0ef4 100644 --- a/swagger/swagger.yaml +++ b/swagger/swagger.yaml @@ -638,6 +638,13 @@ definitions: $ref: '#/definitions/p2p.NodeData' type: array type: object + schema.SystemInformationResponse: + properties: + backends: + items: + type: string + type: array + type: object schema.TTSRequest: description: TTS request body properties: @@ -832,6 +839,14 @@ paths: schema: $ref: '#/definitions/gallery.GalleryOpStatus' summary: Returns the job status + /system: + get: + responses: + "200": + description: Response + schema: + $ref: '#/definitions/schema.SystemInformationResponse' + summary: Show the LocalAI instance information /tts: post: consumes: From 36d980e5208230e983ba56cd7d7bef5f8d6009d5 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 6 Sep 2024 12:52:43 +0200 Subject: [PATCH 0076/1530] models(gallery): add reflection-llama-3.1-70b (#3487) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 028d1af4..e7b31dd3 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -423,6 +423,22 @@ - filename: Hubble-4B-v1-Q4_K_M.gguf uri: huggingface://TheDrummer/Hubble-4B-v1-GGUF/Hubble-4B-v1-Q4_K_M.gguf sha256: 0721294d0e861c6e6162a112fc7242e0c4b260c156137f4bcbb08667f1748080 +- !!merge <<: *llama31 + name: "reflection-llama-3.1-70b" + urls: + - https://huggingface.co/leafspark/Reflection-Llama-3.1-70B-bf16 + - https://huggingface.co/senseable/Reflection-Llama-3.1-70B-gguf + description: | + Reflection Llama-3.1 70B is (currently) the world's top open-source LLM, trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course. + + The model was trained on synthetic data generated by Glaive. If you're training a model, Glaive is incredible — use them. + overrides: + parameters: + model: Reflection-Llama-3.1-70B-q4_k_m.gguf + files: + - filename: Reflection-Llama-3.1-70B-q4_k_m.gguf + sha256: 16064e07037883a750cfeae9a7be41143aa857dbac81c2e93c68e2f941dee7b2 + uri: huggingface://senseable/Reflection-Llama-3.1-70B-gguf/Reflection-Llama-3.1-70B-q4_k_m.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From 8b8522046d1480a8539ab7522c62d6b27392e7d1 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 6 Sep 2024 23:41:08 +0200 Subject: [PATCH 0077/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `815b1fb20a53e439882171757825bacb1350de04` (#3489) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4b819224..80aec855 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=4db04784f96757d74f74c8c110c2a00d55e33514 +CPPLLAMA_VERSION?=815b1fb20a53e439882171757825bacb1350de04 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 96ea240b390f7ee0409f9acffe7738aecb5fb0e7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 7 Sep 2024 12:09:29 +0200 Subject: [PATCH 0078/1530] models(gallery): add athena-codegemma-2-2b-it (#3490) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index e7b31dd3..4939820d 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1708,6 +1708,20 @@ - filename: shieldgemma-9b.i1-Q4_K_M.gguf sha256: ffa7eaadcc0c7d0544fda5b0d86bba3ffa3431b673e5b2135f421cfe65bd8732 uri: huggingface://mradermacher/shieldgemma-9b-i1-GGUF/shieldgemma-9b.i1-Q4_K_M.gguf +- !!merge <<: *gemma + name: "athena-codegemma-2-2b-it" + urls: + - https://huggingface.co/EpistemeAI/Athena-codegemma-2-2b-it + - https://huggingface.co/mradermacher/Athena-codegemma-2-2b-it-GGUF + description: | + Supervised fine tuned (sft unsloth) for coding with EpistemeAI coding dataset. + overrides: + parameters: + model: Athena-codegemma-2-2b-it.Q4_K_M.gguf + files: + - filename: Athena-codegemma-2-2b-it.Q4_K_M.gguf + sha256: 59ce17023438b0da603dd211c7d39f78e7acac4108258ac0818a97a4ca7d64e3 + uri: huggingface://mradermacher/Athena-codegemma-2-2b-it-GGUF/Athena-codegemma-2-2b-it.Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 5139dadceb9be607ba1071a4c17d38e58476ef01 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 8 Sep 2024 02:00:38 +0200 Subject: [PATCH 0079/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `5caa19240d55bfd6ee316d50fbad32c6e9c39528` (#3494) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 80aec855..fe05dc1a 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=5236f0278420ab776d1787c4330678d80219b4b6 +WHISPER_CPP_VERSION?=5caa19240d55bfd6ee316d50fbad32c6e9c39528 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 7781dfe49e0695854b3373855694bf53eedadf76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 21:30:24 +0000 Subject: [PATCH 0080/1530] chore(deps): Bump langchain-community from 0.2.15 to 0.2.16 in /examples/langchain/langchainpy-localai-example (#3500) chore(deps): Bump langchain-community Bumps [langchain-community](https://github.com/langchain-ai/langchain) from 0.2.15 to 0.2.16. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain-community==0.2.15...langchain-community==0.2.16) --- updated-dependencies: - dependency-name: langchain-community dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 242e5618..588ef76d 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -11,7 +11,7 @@ frozenlist==1.4.1 greenlet==3.0.3 idna==3.8 langchain==0.2.15 -langchain-community==0.2.15 +langchain-community==0.2.16 marshmallow==3.22.0 marshmallow-enum==1.5.1 multidict==6.0.5 From 486b491c4c38219474f316564b3fa73cc4bd16f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 21:42:13 +0000 Subject: [PATCH 0081/1530] chore(deps): Bump openai from 1.43.0 to 1.44.0 in /examples/langchain/langchainpy-localai-example (#3504) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.43.0 to 1.44.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.43.0...v1.44.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 588ef76d..b0bae58f 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.0.5 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.0 -openai==1.43.0 +openai==1.44.0 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.8.2 From 424b2e0064863b18c9ee6af2f5b3460bad77ee02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 21:49:26 +0000 Subject: [PATCH 0082/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `550a6ee` to `f696f60` (#3505) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `550a6ee` to `f696f60`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/550a6eeb9252da5ca729f25dc91df6dd3ee9d5ce...f696f60f4e44e18a34512b895a7b65a72c801bd8) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index 550a6eeb..f696f60f 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit 550a6eeb9252da5ca729f25dc91df6dd3ee9d5ce +Subproject commit f696f60f4e44e18a34512b895a7b65a72c801bd8 From c3e374f30ae91ae291e52f3c81d3a2d92c62da11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 22:00:28 +0000 Subject: [PATCH 0083/1530] chore(deps): Bump langchain from 0.2.15 to 0.2.16 in /examples/langchain-chroma (#3507) chore(deps): Bump langchain in /examples/langchain-chroma Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.15 to 0.2.16. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.15...langchain==0.2.16) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 08d575d0..6aee2421 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ -langchain==0.2.15 +langchain==0.2.16 openai==1.43.0 chromadb==0.5.5 llama-index==0.11.4 \ No newline at end of file From c5d509234788a8bd2893007e7744cee242d047d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 01:52:16 +0000 Subject: [PATCH 0084/1530] chore(deps): Bump peter-evans/create-pull-request from 6 to 7 (#3518) Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 6 to 7. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/v6...v7) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/bump_deps.yaml | 2 +- .github/workflows/bump_docs.yaml | 2 +- .github/workflows/checksum_checker.yaml | 2 +- .github/workflows/update_swagger.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bump_deps.yaml b/.github/workflows/bump_deps.yaml index 68cb81cb..c94a134d 100644 --- a/.github/workflows/bump_deps.yaml +++ b/.github/workflows/bump_deps.yaml @@ -56,7 +56,7 @@ jobs: rm -rfv ${{ matrix.variable }}_message.txt rm -rfv ${{ matrix.variable }}_commit.txt - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.UPDATE_BOT_TOKEN }} push-to-fork: ci-forks/LocalAI diff --git a/.github/workflows/bump_docs.yaml b/.github/workflows/bump_docs.yaml index 218dcc61..6eb390df 100644 --- a/.github/workflows/bump_docs.yaml +++ b/.github/workflows/bump_docs.yaml @@ -17,7 +17,7 @@ jobs: run: | bash .github/bump_docs.sh ${{ matrix.repository }} - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.UPDATE_BOT_TOKEN }} push-to-fork: ci-forks/LocalAI diff --git a/.github/workflows/checksum_checker.yaml b/.github/workflows/checksum_checker.yaml index 4f95a4e2..7b85ad35 100644 --- a/.github/workflows/checksum_checker.yaml +++ b/.github/workflows/checksum_checker.yaml @@ -36,7 +36,7 @@ jobs: sudo chmod 777 /hf_cache bash .github/checksum_checker.sh gallery/index.yaml - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.UPDATE_BOT_TOKEN }} push-to-fork: ci-forks/LocalAI diff --git a/.github/workflows/update_swagger.yaml b/.github/workflows/update_swagger.yaml index b59e78c0..fb4762f8 100644 --- a/.github/workflows/update_swagger.yaml +++ b/.github/workflows/update_swagger.yaml @@ -25,7 +25,7 @@ jobs: run: | make protogen-go swagger - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.UPDATE_BOT_TOKEN }} push-to-fork: ci-forks/LocalAI From 4cfa040f1788977eb7672f94d37668f433d4caa5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 05:32:16 +0000 Subject: [PATCH 0085/1530] chore(deps): Bump openai from 1.43.0 to 1.44.0 in /examples/functions (#3522) Bumps [openai](https://github.com/openai/openai-python) from 1.43.0 to 1.44.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.43.0...v1.44.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 85f18c9f..46875f03 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.2.15 -openai==1.43.0 +openai==1.44.0 From 3be71811ca73fa078e1e0516ac570357a0e3abb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 06:55:30 +0000 Subject: [PATCH 0086/1530] chore(deps): Bump langchain from 0.2.15 to 0.2.16 in /examples/langchain/langchainpy-localai-example (#3502) chore(deps): Bump langchain Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.15 to 0.2.16. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.15...langchain==0.2.16) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index b0bae58f..17f81f80 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -10,7 +10,7 @@ debugpy==1.8.2 frozenlist==1.4.1 greenlet==3.0.3 idna==3.8 -langchain==0.2.15 +langchain==0.2.16 langchain-community==0.2.16 marshmallow==3.22.0 marshmallow-enum==1.5.1 From 300f2779e425b97cb1f9f81d0ac2495c62e3afb0 Mon Sep 17 00:00:00 2001 From: Dave Date: Tue, 10 Sep 2024 02:57:16 -0400 Subject: [PATCH 0087/1530] fix: speedup and improve cachability of docker build of `builder-sd` (#3430) fix: speedup and improve cachability of docker build of `builder-sd` (#3430) --------- Signed-off-by: Dave Lee --- Dockerfile | 25 ++++++++++++++++++------- core/backend/transcript.go | 10 +++++----- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index 14e037e6..b86cc706 100644 --- a/Dockerfile +++ b/Dockerfile @@ -263,14 +263,20 @@ EOT # In most cases, builder is the image you should be using - however, this can save build time if one just needs to copy backend-assets/grpc/stablediffusion and nothing else. FROM builder-base AS builder-sd -COPY . . -COPY .git . +# stablediffusion does not tolerate a newer version of abseil, copy only over enough elements to build it +COPY Makefile . +COPY go.mod . +COPY go.sum . +COPY backend/backend.proto ./backend/backend.proto +COPY backend/go/image/stablediffusion ./backend/go/image/stablediffusion +COPY pkg/grpc ./pkg/grpc +COPY pkg/stablediffusion ./pkg/stablediffusion +RUN git init +RUN make sources/go-stable-diffusion +RUN touch prepare-sources -RUN make prepare - - -# stablediffusion does not tolerate a newer version of abseil, build it first -RUN GRPC_BACKENDS=backend-assets/grpc/stablediffusion make build +# Actually build the backend +RUN GRPC_BACKENDS=backend-assets/grpc/stablediffusion make backend-assets/grpc/stablediffusion ################################### ################################### @@ -285,6 +291,11 @@ COPY --from=grpc /opt/grpc /usr/local # Rebuild with defaults backends WORKDIR /build +COPY . . +COPY .git . + +RUN make prepare + ## Build the binary ## If it's CUDA, we want to skip some of the llama-compat backends to save space ## We only leave the most CPU-optimized variant and the fallback for the cublas build diff --git a/core/backend/transcript.go b/core/backend/transcript.go index ed3e24a5..6ebc7c10 100644 --- a/core/backend/transcript.go +++ b/core/backend/transcript.go @@ -9,7 +9,7 @@ import ( "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/grpc/proto" - model "github.com/mudler/LocalAI/pkg/model" + "github.com/mudler/LocalAI/pkg/model" ) func ModelTranscription(audio, language string, translate bool, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.TranscriptionResult, error) { @@ -22,16 +22,16 @@ func ModelTranscription(audio, language string, translate bool, ml *model.ModelL model.WithAssetDir(appConfig.AssetsDestination), }) - whisperModel, err := ml.BackendLoader(opts...) + transcriptionModel, err := ml.BackendLoader(opts...) if err != nil { return nil, err } - if whisperModel == nil { - return nil, fmt.Errorf("could not load whisper model") + if transcriptionModel == nil { + return nil, fmt.Errorf("could not load transcription model") } - r, err := whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{ + r, err := transcriptionModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{ Dst: audio, Language: language, Translate: translate, From eda5c1422b6a80d7edd35c12720158b803096646 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 07:43:18 +0000 Subject: [PATCH 0088/1530] chore(deps): Bump numpy from 2.1.0 to 2.1.1 in /examples/langchain/langchainpy-localai-example (#3503) chore(deps): Bump numpy Bumps [numpy](https://github.com/numpy/numpy) from 2.1.0 to 2.1.1. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/RELEASE_WALKTHROUGH.rst) - [Commits](https://github.com/numpy/numpy/compare/v2.1.0...v2.1.1) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 17f81f80..0c7be917 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -17,7 +17,7 @@ marshmallow-enum==1.5.1 multidict==6.0.5 mypy-extensions==1.0.0 numexpr==2.10.1 -numpy==2.1.0 +numpy==2.1.1 openai==1.44.0 openapi-schema-pydantic==1.2.4 packaging>=23.2 From 535f77176111ea8fdda0cc928328b8c8b9ae2c45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 07:57:35 +0000 Subject: [PATCH 0089/1530] chore(deps): Bump llama-index from 0.11.4 to 0.11.7 in /examples/langchain-chroma (#3508) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.4 to 0.11.7. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.4...v0.11.7) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 6aee2421..d2b38ea7 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.16 openai==1.43.0 chromadb==0.5.5 -llama-index==0.11.4 \ No newline at end of file +llama-index==0.11.7 \ No newline at end of file From 48a1a7da2336cc769eaf0e6b681ddf74c2cef834 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 08:04:52 +0000 Subject: [PATCH 0090/1530] chore(deps): Bump langchain from 0.2.15 to 0.2.16 in /examples/functions (#3521) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.15 to 0.2.16. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.15...langchain==0.2.16) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 46875f03..8258885a 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.2.15 +langchain==0.2.16 openai==1.44.0 From 8f45852273408c97e8bd04c5f53d601effedadd5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 09:34:41 +0000 Subject: [PATCH 0091/1530] chore(deps): Bump openai from 1.43.0 to 1.44.1 in /examples/langchain-chroma (#3532) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.43.0 to 1.44.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.43.0...v1.44.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index d2b38ea7..c9bce6e9 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.16 -openai==1.43.0 +openai==1.44.1 chromadb==0.5.5 llama-index==0.11.7 \ No newline at end of file From b7496dea9be938c7d51b9ffbc1f07246481db66b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 09:15:30 +0200 Subject: [PATCH 0092/1530] chore(deps): Bump yarl from 1.9.7 to 1.11.0 in /examples/langchain/langchainpy-localai-example (#3501) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.9.7 to 1.11.0. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.9.7...v1.11.0) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 0c7be917..75323005 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.2 -yarl==1.9.7 +yarl==1.11.0 From a7ac2f7bb0800b13b2cb2f305528ead6db9fd695 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 09:15:52 +0200 Subject: [PATCH 0093/1530] chore(deps): Bump llama-index from 0.11.4 to 0.11.7 in /examples/chainlit (#3516) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.4 to 0.11.7. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.4...v0.11.7) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 8654ea99..69212e28 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.4 +llama_index==0.11.7 requests==2.32.3 weaviate_client==4.6.7 transformers From e35d8169b1cc1848cc339cb2525b3e6d42a1393b Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:52:27 +0200 Subject: [PATCH 0094/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `a551933542d956ae84634937acd2942eb40efaaf` (#3534) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fe05dc1a..e0e15cfb 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=5caa19240d55bfd6ee316d50fbad32c6e9c39528 +WHISPER_CPP_VERSION?=a551933542d956ae84634937acd2942eb40efaaf # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From d51444d606e1c616c396e37d7413a8a562714cb6 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 12 Sep 2024 20:55:27 +0200 Subject: [PATCH 0095/1530] chore(deps): update llama.cpp (#3497) * Apply llava patch Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/grpc-server.cpp | 107 +++++++++++------------ backend/cpp/llama/patches/01-llava.patch | 13 +++ backend/cpp/llama/prepare.sh | 7 ++ backend/cpp/llama/utils.hpp | 27 ------ 5 files changed, 70 insertions(+), 86 deletions(-) create mode 100644 backend/cpp/llama/patches/01-llava.patch diff --git a/Makefile b/Makefile index e0e15cfb..3d9ea592 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=815b1fb20a53e439882171757825bacb1350de04 +CPPLLAMA_VERSION?=e6b7801bd189d102d901d3e72035611a25456ef1 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index e1b6f868..a46b4ee0 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -17,11 +17,10 @@ #include "common.h" #include "json.hpp" #include "llama.h" -#include "grammar-parser.h" #include "backend.pb.h" #include "backend.grpc.pb.h" #include "utils.hpp" - +#include "sampling.h" // include std::regex #include #include @@ -203,8 +202,8 @@ struct llama_client_slot std::string stopping_word; // sampling - struct llama_sampling_params sparams; - llama_sampling_context *ctx_sampling = nullptr; + struct gpt_sampler_params sparams; + gpt_sampler *ctx_sampling = nullptr; int32_t ga_i = 0; // group-attention state int32_t ga_n = 1; // group-attention factor @@ -619,7 +618,7 @@ struct llama_server_context bool launch_slot_with_data(llama_client_slot* &slot, json data) { slot_params default_params; - llama_sampling_params default_sparams; + gpt_sampler_params default_sparams; slot->params.stream = json_value(data, "stream", false); slot->params.cache_prompt = json_value(data, "cache_prompt", false); @@ -628,7 +627,7 @@ struct llama_server_context slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p); slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p); slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z); - slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p); + slot->sparams.typ_p = json_value(data, "typical_p", default_sparams.typ_p); slot->sparams.temp = json_value(data, "temperature", default_sparams.temp); slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range); slot->sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent); @@ -641,7 +640,7 @@ struct llama_server_context slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta); slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl); slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep); - slot->params.seed = json_value(data, "seed", default_params.seed); + slot->sparams.seed = json_value(data, "seed", default_sparams.seed); slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar); slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs); slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep); @@ -665,6 +664,7 @@ struct llama_server_context slot->params.input_prefix = ""; } + if (data.count("input_suffix") != 0) { slot->params.input_suffix = data["input_suffix"]; @@ -683,6 +683,10 @@ struct llama_server_context slot->prompt = ""; } + if (json_value(data, "ignore_eos", false)) { + slot->sparams.logit_bias.push_back({llama_token_eos(model), -INFINITY}); + } + /* slot->sparams.penalty_prompt_tokens.clear(); slot->sparams.use_penalty_prompt_tokens = false; const auto &penalty_prompt = data.find("penalty_prompt"); @@ -718,14 +722,10 @@ struct llama_server_context slot->sparams.use_penalty_prompt_tokens = true; } } + */ slot->sparams.logit_bias.clear(); - if (json_value(data, "ignore_eos", false)) - { - slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY; - } - const auto &logit_bias = data.find("logit_bias"); if (logit_bias != data.end() && logit_bias->is_array()) { @@ -753,7 +753,7 @@ struct llama_server_context llama_token tok = el[0].get(); if (tok >= 0 && tok < n_vocab) { - slot->sparams.logit_bias[tok] = bias; + slot->sparams.logit_bias.push_back({tok, bias}); } } else if (el[0].is_string()) @@ -761,13 +761,13 @@ struct llama_server_context auto toks = llama_tokenize(model, el[0].get(), false); for (auto tok : toks) { - slot->sparams.logit_bias[tok] = bias; + slot->sparams.logit_bias.push_back({tok, bias}); } } } } } - + slot->params.antiprompt.clear(); const auto &stop = data.find("stop"); @@ -781,24 +781,22 @@ struct llama_server_context } } } - - const auto &samplers_sequence = data.find("samplers"); - if (samplers_sequence != data.end() && samplers_sequence->is_array()) - { + + const auto & samplers = data.find("samplers"); + if (samplers != data.end() && samplers->is_array()) { std::vector sampler_names; - for (const auto &sampler_name : *samplers_sequence) - { - if (sampler_name.is_string()) - { - sampler_names.emplace_back(sampler_name); + for (const auto & name : *samplers) { + if (name.is_string()) { + sampler_names.emplace_back(name); + } } - } - slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false); + slot->sparams.samplers = gpt_sampler_types_from_names(sampler_names, false); } else { - slot->sparams.samplers_sequence = default_sparams.samplers_sequence; + slot->sparams.samplers = default_sparams.samplers; } + if (multimodal) { @@ -875,10 +873,10 @@ struct llama_server_context if (slot->ctx_sampling != nullptr) { - llama_sampling_free(slot->ctx_sampling); + gpt_sampler_free(slot->ctx_sampling); } - slot->ctx_sampling = llama_sampling_init(slot->sparams); - llama_set_rng_seed(ctx, slot->params.seed); + slot->ctx_sampling = gpt_sampler_init(model, slot->sparams); + //llama_set_rng_seed(ctx, slot->params.seed); slot->command = LOAD_PROMPT; all_slots_are_idle = false; @@ -888,7 +886,7 @@ struct llama_server_context {"task_id", slot->task_id}, }); - LOG_TEE("sampling: \n%s\n", llama_sampling_print(slot->sparams).c_str()); + // LOG_TEE("sampling: \n%s\n", llama_sampling_print(slot->sparams).c_str()); return true; } @@ -1006,11 +1004,13 @@ struct llama_server_context slot.generated_text += token_str; slot.has_next_token = true; +/* if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1) { // we can change penalty_prompt_tokens because it is always created from scratch each request slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok); } + */ // check if there is incomplete UTF-8 character at the end bool incomplete = false; @@ -1144,13 +1144,11 @@ struct llama_server_context json get_formated_generation(llama_client_slot &slot) { - const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model)); - const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() && - eos_bias->second < 0.0f && std::isinf(eos_bias->second); - std::vector samplers_sequence; - for (const auto &sampler_type : slot.sparams.samplers_sequence) + std::vector samplers; + samplers.reserve(slot.sparams.samplers.size()); + for (const auto & sampler : slot.sparams.samplers) { - samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type)); + samplers.emplace_back(gpt_sampler_type_to_str(sampler)); } return json { @@ -1165,13 +1163,11 @@ struct llama_server_context {"top_p", slot.sparams.top_p}, {"min_p", slot.sparams.min_p}, {"tfs_z", slot.sparams.tfs_z}, - {"typical_p", slot.sparams.typical_p}, + {"typical_p", slot.sparams.typ_p}, {"repeat_last_n", slot.sparams.penalty_last_n}, {"repeat_penalty", slot.sparams.penalty_repeat}, {"presence_penalty", slot.sparams.penalty_present}, {"frequency_penalty", slot.sparams.penalty_freq}, - {"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens}, - {"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens}, {"mirostat", slot.sparams.mirostat}, {"mirostat_tau", slot.sparams.mirostat_tau}, {"mirostat_eta", slot.sparams.mirostat_eta}, @@ -1179,13 +1175,13 @@ struct llama_server_context {"stop", slot.params.antiprompt}, {"n_predict", slot.params.n_predict}, {"n_keep", params.n_keep}, - {"ignore_eos", ignore_eos}, + {"ignore_eos", slot.sparams.ignore_eos}, {"stream", slot.params.stream}, - {"logit_bias", slot.sparams.logit_bias}, + // {"logit_bias", slot.sparams.logit_bias}, {"n_probs", slot.sparams.n_probs}, {"min_keep", slot.sparams.min_keep}, {"grammar", slot.sparams.grammar}, - {"samplers", samplers_sequence} + {"samplers", samplers} }; } @@ -1714,7 +1710,7 @@ struct llama_server_context if (!slot.params.cache_prompt) { - llama_sampling_reset(slot.ctx_sampling); + gpt_sampler_reset(slot.ctx_sampling); slot.n_past = 0; slot.n_past_se = 0; @@ -1726,7 +1722,7 @@ struct llama_server_context // push the prompt into the sampling context (do not apply grammar) for (auto &token : prompt_tokens) { - llama_sampling_accept(slot.ctx_sampling, ctx, token, false); + gpt_sampler_accept(slot.ctx_sampling, token, false); } slot.n_past = common_part(slot.cache_tokens, prompt_tokens); @@ -1934,9 +1930,9 @@ struct llama_server_context } completion_token_output result; - const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i); + const llama_token id = gpt_sampler_sample(slot.ctx_sampling, ctx, slot.i_batch - i); - llama_sampling_accept(slot.ctx_sampling, ctx, id, true); + gpt_sampler_accept(slot.ctx_sampling, id, true); slot.n_decoded += 1; if (slot.n_decoded == 1) @@ -1946,19 +1942,14 @@ struct llama_server_context metrics.on_prompt_eval(slot); } - llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false }; result.tok = id; + const auto * cur_p = gpt_sampler_get_candidates(slot.ctx_sampling); - const int32_t n_probs = slot.sparams.n_probs; - if (slot.sparams.temp <= 0 && n_probs > 0) - { - // for llama_sample_token_greedy we need to sort candidates - llama_sample_softmax(ctx, &cur_p); - } - - for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i) - { - result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p}); + for (size_t i = 0; i < (size_t) slot.sparams.n_probs; ++i) { + result.probs.push_back({ + cur_p->data[i].id, + i >= cur_p->size ? 0.0f : cur_p->data[i].p, + }); } if (!process_token(result, slot)) diff --git a/backend/cpp/llama/patches/01-llava.patch b/backend/cpp/llama/patches/01-llava.patch new file mode 100644 index 00000000..fa122da2 --- /dev/null +++ b/backend/cpp/llama/patches/01-llava.patch @@ -0,0 +1,13 @@ +diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp +index 342042ff..224db9b5 100644 +--- a/examples/llava/clip.cpp ++++ b/examples/llava/clip.cpp +@@ -2419,7 +2419,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima + struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); + int* patches_data = (int*)malloc(ggml_nbytes(patches)); + for (int i = 0; i < num_patches; i++) { +- patches_data[i] = i + 1; ++ patches_data[i] = i; + } + ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches)); + free(patches_data); \ No newline at end of file diff --git a/backend/cpp/llama/prepare.sh b/backend/cpp/llama/prepare.sh index 6c00f27c..4c8393b9 100644 --- a/backend/cpp/llama/prepare.sh +++ b/backend/cpp/llama/prepare.sh @@ -1,5 +1,12 @@ #!/bin/bash +## Patches +## Apply patches from the `patches` directory +for patch in $(ls patches); do + echo "Applying patch $patch" + patch -d llama.cpp/ -p1 < patches/$patch +done + cp -r CMakeLists.txt llama.cpp/examples/grpc-server/ cp -r grpc-server.cpp llama.cpp/examples/grpc-server/ cp -rfv json.hpp llama.cpp/examples/grpc-server/ diff --git a/backend/cpp/llama/utils.hpp b/backend/cpp/llama/utils.hpp index c5dafbf0..198b6f26 100644 --- a/backend/cpp/llama/utils.hpp +++ b/backend/cpp/llama/utils.hpp @@ -480,31 +480,4 @@ static inline std::vector base64_decode(const std::string & encoded_str } return ret; -} - -// -// random string / id -// - -static std::string random_string() -{ - static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"); - - std::random_device rd; - std::mt19937 generator(rd()); - - std::string result(32, ' '); - - for (int i = 0; i < 32; ++i) { - result[i] = str[generator() % str.size()]; - } - - return result; -} - -static std::string gen_chatcmplid() -{ - std::stringstream chatcmplid; - chatcmplid << "chatcmpl-" << random_string(); - return chatcmplid.str(); } \ No newline at end of file From cf747bcdeccaef7ef2128b3e0329da76871f83d7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 13 Sep 2024 13:27:36 +0200 Subject: [PATCH 0096/1530] feat: extract output with regexes from LLMs (#3491) * feat: extract output with regexes from LLMs This changset adds `extract_regex` to the LLM config. It is a list of regexes that can match output and will be used to re extract text from the LLM output. This is particularly useful for LLMs which outputs final results into tags. Signed-off-by: Ettore Di Giacinto * Add tests, enhance output in case of configuration error Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- core/backend/backend_suite_test.go | 13 ++++ core/backend/llm.go | 28 +++++++- core/backend/llm_test.go | 109 +++++++++++++++++++++++++++++ core/config/backend_config.go | 1 + core/http/endpoints/openai/chat.go | 8 +-- 5 files changed, 154 insertions(+), 5 deletions(-) create mode 100644 core/backend/backend_suite_test.go create mode 100644 core/backend/llm_test.go diff --git a/core/backend/backend_suite_test.go b/core/backend/backend_suite_test.go new file mode 100644 index 00000000..541c91f6 --- /dev/null +++ b/core/backend/backend_suite_test.go @@ -0,0 +1,13 @@ +package backend_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestBackend(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Backend test suite") +} diff --git a/core/backend/llm.go b/core/backend/llm.go index 72c4ad9f..2b4564a8 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -9,6 +9,8 @@ import ( "sync" "unicode/utf8" + "github.com/rs/zerolog/log" + "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" @@ -181,13 +183,37 @@ func Finetune(config config.BackendConfig, input, prediction string) string { mu.Lock() reg, ok := cutstrings[c] if !ok { - cutstrings[c] = regexp.MustCompile(c) + r, err := regexp.Compile(c) + if err != nil { + log.Fatal().Err(err).Msg("failed to compile regex") + } + cutstrings[c] = r reg = cutstrings[c] } mu.Unlock() prediction = reg.ReplaceAllString(prediction, "") } + // extract results from the response which can be for instance inside XML tags + var predResult string + for _, r := range config.ExtractRegex { + mu.Lock() + reg, ok := cutstrings[r] + if !ok { + regex, err := regexp.Compile(r) + if err != nil { + log.Fatal().Err(err).Msg("failed to compile regex") + } + cutstrings[r] = regex + reg = regex + } + mu.Unlock() + predResult += reg.FindString(prediction) + } + if predResult != "" { + prediction = predResult + } + for _, c := range config.TrimSpace { prediction = strings.TrimSpace(strings.TrimPrefix(prediction, c)) } diff --git a/core/backend/llm_test.go b/core/backend/llm_test.go new file mode 100644 index 00000000..f7630702 --- /dev/null +++ b/core/backend/llm_test.go @@ -0,0 +1,109 @@ +package backend_test + +import ( + . "github.com/mudler/LocalAI/core/backend" + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/core/schema" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("LLM tests", func() { + Context("Finetune LLM output", func() { + var ( + testConfig config.BackendConfig + input string + prediction string + result string + ) + + BeforeEach(func() { + testConfig = config.BackendConfig{ + PredictionOptions: schema.PredictionOptions{ + Echo: false, + }, + LLMConfig: config.LLMConfig{ + Cutstrings: []string{`<.*?>`}, // Example regex for removing XML tags + ExtractRegex: []string{`(.*?)`}, // Example regex to extract from tags + TrimSpace: []string{" ", "\n"}, + TrimSuffix: []string{".", "!"}, + }, + } + }) + + Context("when echo is enabled", func() { + BeforeEach(func() { + testConfig.Echo = true + input = "Hello" + prediction = "World" + }) + + It("should prepend input to prediction", func() { + result = Finetune(testConfig, input, prediction) + Expect(result).To(Equal("HelloWorld")) + }) + }) + + Context("when echo is disabled", func() { + BeforeEach(func() { + testConfig.Echo = false + input = "Hello" + prediction = "World" + }) + + It("should not modify the prediction with input", func() { + result = Finetune(testConfig, input, prediction) + Expect(result).To(Equal("World")) + }) + }) + + Context("when cutstrings regex is applied", func() { + BeforeEach(func() { + input = "" + prediction = "
Hello
World" + }) + + It("should remove substrings matching cutstrings regex", func() { + result = Finetune(testConfig, input, prediction) + Expect(result).To(Equal("Hello World")) + }) + }) + + Context("when extract regex is applied", func() { + BeforeEach(func() { + input = "" + prediction = "42" + }) + + It("should extract substrings matching the extract regex", func() { + result = Finetune(testConfig, input, prediction) + Expect(result).To(Equal("42")) + }) + }) + + Context("when trimming spaces", func() { + BeforeEach(func() { + input = "" + prediction = " Hello World " + }) + + It("should trim spaces from the prediction", func() { + result = Finetune(testConfig, input, prediction) + Expect(result).To(Equal("Hello World")) + }) + }) + + Context("when trimming suffixes", func() { + BeforeEach(func() { + input = "" + prediction = "Hello World." + }) + + It("should trim suffixes from the prediction", func() { + result = Finetune(testConfig, input, prediction) + Expect(result).To(Equal("Hello World")) + }) + }) + }) +}) diff --git a/core/config/backend_config.go b/core/config/backend_config.go index b83e1a98..027e18a4 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -126,6 +126,7 @@ type LLMConfig struct { Grammar string `yaml:"grammar"` StopWords []string `yaml:"stopwords"` Cutstrings []string `yaml:"cutstrings"` + ExtractRegex []string `yaml:"extract_regex"` TrimSpace []string `yaml:"trimspace"` TrimSuffix []string `yaml:"trimsuffix"` diff --git a/core/http/endpoints/openai/chat.go b/core/http/endpoints/openai/chat.go index a979b7bc..8144bdcd 100644 --- a/core/http/endpoints/openai/chat.go +++ b/core/http/endpoints/openai/chat.go @@ -68,9 +68,9 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup textContentToReturn = functions.ParseTextContent(result, config.FunctionsConfig) result = functions.CleanupLLMResult(result, config.FunctionsConfig) - results := functions.ParseFunctionCall(result, config.FunctionsConfig) + functionResults := functions.ParseFunctionCall(result, config.FunctionsConfig) log.Debug().Msgf("Text content to return: %s", textContentToReturn) - noActionToRun := len(results) > 0 && results[0].Name == noAction || len(results) == 0 + noActionToRun := len(functionResults) > 0 && functionResults[0].Name == noAction || len(functionResults) == 0 switch { case noActionToRun: @@ -83,7 +83,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup } responses <- initialMessage - result, err := handleQuestion(config, req, ml, startupOptions, results, result, prompt) + result, err := handleQuestion(config, req, ml, startupOptions, functionResults, result, prompt) if err != nil { log.Error().Err(err).Msg("error handling question") return @@ -105,7 +105,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup responses <- resp default: - for i, ss := range results { + for i, ss := range functionResults { name, args := ss.Name, ss.Arguments initialMessage := schema.OpenAIResponse{ From 7fe6d0ad2be25e31fe38439182911d74ec2c569f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 13 Sep 2024 19:19:26 +0200 Subject: [PATCH 0097/1530] chore(gosec): fix CI (#3537) downgrade to latest known version of the gosec action Signed-off-by: Ettore Di Giacinto --- .github/workflows/secscan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/secscan.yaml b/.github/workflows/secscan.yaml index d9743d9e..db9db586 100644 --- a/.github/workflows/secscan.yaml +++ b/.github/workflows/secscan.yaml @@ -18,7 +18,7 @@ jobs: if: ${{ github.actor != 'dependabot[bot]' }} - name: Run Gosec Security Scanner if: ${{ github.actor != 'dependabot[bot]' }} - uses: securego/gosec@master + uses: securego/gosec@v2.21.0 with: # we let the report trigger content trigger a failure using the GitHub Security features. args: '-no-fail -fmt sarif -out results.sarif ./...' From 5213e79f5c3059616e461176564c97cca481e367 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 13 Sep 2024 19:48:54 +0200 Subject: [PATCH 0098/1530] models(gallery): add azure_dusk-v0.2-iq-imatrix (#3538) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 4939820d..86e81338 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -658,6 +658,23 @@ - filename: Mahou-1.3-llama3.1-8B.Q4_K_M.gguf sha256: 88bfdca2f6077d789d3e0f161d19711aa208a6d9a02cce96a2276c69413b3594 uri: huggingface://mradermacher/Mahou-1.3-llama3.1-8B-GGUF/Mahou-1.3-llama3.1-8B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "azure_dusk-v0.2-iq-imatrix" + # chatml + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/n3-g_YTk3FY-DBzxXd28E.png + urls: + - https://huggingface.co/Lewdiculous/Azure_Dusk-v0.2-GGUF-IQ-Imatrix + description: | + "Following up on Crimson_Dawn-v0.2 we have Azure_Dusk-v0.2! Training on Mistral-Nemo-Base-2407 this time I've added significantly more data, as well as trained using RSLoRA as opposed to regular LoRA. Another key change is training on ChatML as opposed to Mistral Formatting." + by Author. + overrides: + parameters: + model: Azure_Dusk-v0.2-Q4_K_M-imat.gguf + files: + - filename: Azure_Dusk-v0.2-Q4_K_M-imat.gguf + sha256: c03a670c00976d14c267a0322374ed488b2a5f4790eb509136ca4e75cbc10cf4 + uri: huggingface://Lewdiculous/Azure_Dusk-v0.2-GGUF-IQ-Imatrix/Azure_Dusk-v0.2-Q4_K_M-imat.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 925315ab5cd4f09e32cba08c5ca23bb9b215ba98 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 13 Sep 2024 19:49:14 +0200 Subject: [PATCH 0099/1530] models(gallery): add mn-12b-lyra-v4-iq-imatrix (#3539) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 86e81338..b70a9122 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1212,6 +1212,23 @@ - filename: Pantheon-RP-1.6-12b-Nemo-Q4_K_M.gguf sha256: cf3465c183bf4ecbccd1b6b480f687e0160475b04c87e2f1e5ebc8baa0f4c7aa uri: huggingface://bartowski/Pantheon-RP-1.6-12b-Nemo-GGUF/Pantheon-RP-1.6-12b-Nemo-Q4_K_M.gguf +- !!merge <<: *mistral03 + name: "mn-12b-lyra-v4-iq-imatrix" + icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/dVoru83WOpwVjMlgZ_xhA.png + #chatml + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + urls: + - https://huggingface.co/Lewdiculous/MN-12B-Lyra-v4-GGUF-IQ-Imatrix + description: | + A finetune of Mistral Nemo by Sao10K. + Uses the ChatML prompt format. + overrides: + parameters: + model: MN-12B-Lyra-v4-Q4_K_M-imat.gguf + files: + - filename: MN-12B-Lyra-v4-Q4_K_M-imat.gguf + sha256: 1989123481ca1936c8a2cbe278ff5d1d2b0ae63dbdc838bb36a6d7547b8087b3 + uri: huggingface://Lewdiculous/MN-12B-Lyra-v4-GGUF-IQ-Imatrix/MN-12B-Lyra-v4-Q4_K_M-imat.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From cbfab81c3599c8c2051209234edf0a3259b5efaa Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 13 Sep 2024 21:49:18 +0200 Subject: [PATCH 0100/1530] models(gallery): add datagemma models (#3540) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index b70a9122..188576b2 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1756,6 +1756,34 @@ - filename: Athena-codegemma-2-2b-it.Q4_K_M.gguf sha256: 59ce17023438b0da603dd211c7d39f78e7acac4108258ac0818a97a4ca7d64e3 uri: huggingface://mradermacher/Athena-codegemma-2-2b-it-GGUF/Athena-codegemma-2-2b-it.Q4_K_M.gguf +- !!merge <<: *gemma + name: "datagemma-rag-27b-it" + urls: + - https://huggingface.co/google/datagemma-rag-27b-it + - https://huggingface.co/bartowski/datagemma-rag-27b-it-GGUF + description: | + DataGemma is a series of fine-tuned Gemma 2 models used to help LLMs access and incorporate reliable public statistical data from Data Commons into their responses. DataGemma RAG is used with Retrieval Augmented Generation, where it is trained to take a user query and generate natural language queries that can be understood by Data Commons' existing natural language interface. More information can be found in this research paper. + overrides: + parameters: + model: datagemma-rag-27b-it-Q4_K_M.gguf + files: + - filename: datagemma-rag-27b-it-Q4_K_M.gguf + sha256: 3dfcf51b05e3f0ab0979ad194de350edea71cb14444efa0a9f2ef5bfc80753f8 + uri: huggingface://bartowski/datagemma-rag-27b-it-GGUF/datagemma-rag-27b-it-Q4_K_M.gguf +- !!merge <<: *gemma + name: "datagemma-rig-27b-it" + urls: + - https://huggingface.co/google/datagemma-rig-27b-it + - https://huggingface.co/bartowski/datagemma-rig-27b-it-GGUF + description: | + DataGemma is a series of fine-tuned Gemma 2 models used to help LLMs access and incorporate reliable public statistical data from Data Commons into their responses. DataGemma RIG is used in the retrieval interleaved generation approach (based off of tool-use approaches), where it is trained to annotate a response with natural language queries to Data Commons’ existing natural language interface wherever there are statistics. More information can be found in this research paper. + overrides: + parameters: + model: datagemma-rig-27b-it-Q4_K_M.gguf + files: + - filename: datagemma-rig-27b-it-Q4_K_M.gguf + sha256: a6738ffbb49b6c46d220e2793df85c0538e9ac72398e32a0914ee5e55c3096ad + uri: huggingface://bartowski/datagemma-rig-27b-it-GGUF/datagemma-rig-27b-it-Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 25e7661de268bb9cd31622fdb05052cf26ac4e9f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 13 Sep 2024 21:52:13 +0200 Subject: [PATCH 0101/1530] chore(exllama): drop exllama backend (#3536) * chore(exllama): drop exllama backend For polishing and cleaning up it makes now sense to drop exllama which is completely unmaintained, and was only supporting the llamav1 architecture (nowadays it's superseded by llamav1) . Signed-off-by: Ettore Di Giacinto * chore(gosec): fix CI downgrade to latest known version of the gosec action Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- Dockerfile | 5 +- Makefile | 13 +- backend/python/exllama/.gitignore | 1 - backend/python/exllama/Makefile | 25 --- backend/python/exllama/README.md | 5 - backend/python/exllama/backend.py | 159 ------------------ backend/python/exllama/install.sh | 13 -- backend/python/exllama/requirements-cpu.txt | 3 - .../python/exllama/requirements-cublas11.txt | 4 - .../python/exllama/requirements-cublas12.txt | 3 - backend/python/exllama/requirements.txt | 4 - backend/python/exllama/run.sh | 7 - backend/python/exllama/test.sh | 6 - 13 files changed, 3 insertions(+), 245 deletions(-) delete mode 100644 backend/python/exllama/.gitignore delete mode 100644 backend/python/exllama/Makefile delete mode 100644 backend/python/exllama/README.md delete mode 100755 backend/python/exllama/backend.py delete mode 100755 backend/python/exllama/install.sh delete mode 100644 backend/python/exllama/requirements-cpu.txt delete mode 100644 backend/python/exllama/requirements-cublas11.txt delete mode 100644 backend/python/exllama/requirements-cublas12.txt delete mode 100644 backend/python/exllama/requirements.txt delete mode 100755 backend/python/exllama/run.sh delete mode 100755 backend/python/exllama/test.sh diff --git a/Dockerfile b/Dockerfile index b86cc706..f08cb9a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ ARG TARGETARCH ARG TARGETVARIANT ENV DEBIAN_FRONTEND=noninteractive -ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,openvoice:/build/backend/python/openvoice/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh" +ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh" RUN apt-get update && \ @@ -418,9 +418,6 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG ; fi && \ if [[ ( "${EXTRA_BACKENDS}" =~ "transformers-musicgen" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ make -C backend/python/transformers-musicgen \ - ; fi && \ - if [[ ( "${EXTRA_BACKENDS}" =~ "exllama1" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ - make -C backend/python/exllama \ ; fi RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ diff --git a/Makefile b/Makefile index 3d9ea592..a3f0ffd0 100644 --- a/Makefile +++ b/Makefile @@ -534,10 +534,10 @@ protogen-go-clean: $(RM) bin/* .PHONY: protogen-python -protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen openvoice-protogen +protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen openvoice-protogen .PHONY: protogen-python-clean -protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean openvoice-protogen-clean +protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean openvoice-protogen-clean .PHONY: autogptq-protogen autogptq-protogen: @@ -571,14 +571,6 @@ diffusers-protogen: diffusers-protogen-clean: $(MAKE) -C backend/python/diffusers protogen-clean -.PHONY: exllama-protogen -exllama-protogen: - $(MAKE) -C backend/python/exllama protogen - -.PHONY: exllama-protogen-clean -exllama-protogen-clean: - $(MAKE) -C backend/python/exllama protogen-clean - .PHONY: exllama2-protogen exllama2-protogen: $(MAKE) -C backend/python/exllama2 protogen @@ -675,7 +667,6 @@ prepare-extra-conda-environments: protogen-python $(MAKE) -C backend/python/parler-tts $(MAKE) -C backend/python/vall-e-x $(MAKE) -C backend/python/openvoice - $(MAKE) -C backend/python/exllama $(MAKE) -C backend/python/exllama2 prepare-test-extra: protogen-python diff --git a/backend/python/exllama/.gitignore b/backend/python/exllama/.gitignore deleted file mode 100644 index 1d3a0654..00000000 --- a/backend/python/exllama/.gitignore +++ /dev/null @@ -1 +0,0 @@ -source \ No newline at end of file diff --git a/backend/python/exllama/Makefile b/backend/python/exllama/Makefile deleted file mode 100644 index e6a67881..00000000 --- a/backend/python/exllama/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -export CONDA_ENV_PATH = "exllama.yml" - -.PHONY: exllama -exllama: protogen - bash install.sh ${CONDA_ENV_PATH} - -.PHONY: run -run: protogen - @echo "Running exllama..." - bash run.sh - @echo "exllama run." - -.PHONY: protogen -protogen: backend_pb2_grpc.py backend_pb2.py - -.PHONY: protogen-clean -protogen-clean: - $(RM) backend_pb2_grpc.py backend_pb2.py - -backend_pb2_grpc.py backend_pb2.py: - python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto - -.PHONY: clean -clean: protogen-clean - $(RM) -r venv source __pycache__ \ No newline at end of file diff --git a/backend/python/exllama/README.md b/backend/python/exllama/README.md deleted file mode 100644 index f9ed5e9f..00000000 --- a/backend/python/exllama/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Creating a separate environment for the exllama project - -``` -make exllama -``` \ No newline at end of file diff --git a/backend/python/exllama/backend.py b/backend/python/exllama/backend.py deleted file mode 100755 index 58d1392c..00000000 --- a/backend/python/exllama/backend.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python3 -import grpc -from concurrent import futures -import time -import backend_pb2 -import backend_pb2_grpc -import argparse -import signal -import sys -import os, glob - -from pathlib import Path -import torch -import torch.nn.functional as F -from torch import version as torch_version - -from source.tokenizer import ExLlamaTokenizer -from source.generator import ExLlamaGenerator -from source.model import ExLlama, ExLlamaCache, ExLlamaConfig - -_ONE_DAY_IN_SECONDS = 60 * 60 * 24 - -# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 -MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) - -# Implement the BackendServicer class with the service methods -class BackendServicer(backend_pb2_grpc.BackendServicer): - def generate(self,prompt, max_new_tokens): - self.generator.end_beam_search() - - # Tokenizing the input - ids = self.generator.tokenizer.encode(prompt) - - self.generator.gen_begin_reuse(ids) - initial_len = self.generator.sequence[0].shape[0] - has_leading_space = False - decoded_text = '' - for i in range(max_new_tokens): - token = self.generator.gen_single_token() - if i == 0 and self.generator.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'): - has_leading_space = True - - decoded_text = self.generator.tokenizer.decode(self.generator.sequence[0][initial_len:]) - if has_leading_space: - decoded_text = ' ' + decoded_text - - if token.item() == self.generator.tokenizer.eos_token_id: - break - return decoded_text - def Health(self, request, context): - return backend_pb2.Reply(message=bytes("OK", 'utf-8')) - def LoadModel(self, request, context): - try: - # https://github.com/turboderp/exllama/blob/master/example_cfg.py - model_directory = request.ModelFile - - # Locate files we need within that directory - tokenizer_path = os.path.join(model_directory, "tokenizer.model") - model_config_path = os.path.join(model_directory, "config.json") - st_pattern = os.path.join(model_directory, "*.safetensors") - model_path = glob.glob(st_pattern)[0] - - # Create config, model, tokenizer and generator - - config = ExLlamaConfig(model_config_path) # create config from config.json - config.model_path = model_path # supply path to model weights file - if (request.ContextSize): - config.max_seq_len = request.ContextSize # override max sequence length - config.max_attention_size = request.ContextSize**2 # Should be set to context_size^2. - # https://github.com/turboderp/exllama/issues/220#issuecomment-1720324163 - - # Set Rope scaling. - if (request.RopeFreqScale): - # Alpha value for Rope scaling. - # Higher value increases context but adds perplexity. - # alpha_value and compress_pos_emb are mutually exclusive. - # https://github.com/turboderp/exllama/issues/115 - config.alpha_value = request.RopeFreqScale - config.calculate_rotary_embedding_base() - - model = ExLlama(config) # create ExLlama instance and load the weights - tokenizer = ExLlamaTokenizer(tokenizer_path) # create tokenizer from tokenizer model file - - cache = ExLlamaCache(model, batch_size = 2) # create cache for inference - generator = ExLlamaGenerator(model, tokenizer, cache) # create generator - - self.generator= generator - self.model = model - self.tokenizer = tokenizer - self.cache = cache - except Exception as err: - return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") - return backend_pb2.Result(message="Model loaded successfully", success=True) - - def Predict(self, request, context): - penalty = 1.15 - if request.Penalty != 0.0: - penalty = request.Penalty - self.generator.settings.token_repetition_penalty_max = penalty - self.generator.settings.temperature = request.Temperature - self.generator.settings.top_k = request.TopK - self.generator.settings.top_p = request.TopP - - tokens = 512 - if request.Tokens != 0: - tokens = request.Tokens - - if self.cache.batch_size == 1: - del self.cache - self.cache = ExLlamaCache(self.model, batch_size=2) - self.generator = ExLlamaGenerator(self.model, self.tokenizer, self.cache) - - t = self.generate(request.Prompt, tokens) - - # Remove prompt from response if present - if request.Prompt in t: - t = t.replace(request.Prompt, "") - - return backend_pb2.Result(message=bytes(t, encoding='utf-8')) - - def PredictStream(self, request, context): - # Implement PredictStream RPC - #for reply in some_data_generator(): - # yield reply - # Not implemented yet - return self.Predict(request, context) - - -def serve(address): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)) - backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server) - server.add_insecure_port(address) - server.start() - print("Server started. Listening on: " + address, file=sys.stderr) - - # Define the signal handler function - def signal_handler(sig, frame): - print("Received termination signal. Shutting down...") - server.stop(0) - sys.exit(0) - - # Set the signal handlers for SIGINT and SIGTERM - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - try: - while True: - time.sleep(_ONE_DAY_IN_SECONDS) - except KeyboardInterrupt: - server.stop(0) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run the gRPC server.") - parser.add_argument( - "--addr", default="localhost:50051", help="The address to bind the server to." - ) - args = parser.parse_args() - - serve(args.addr) \ No newline at end of file diff --git a/backend/python/exllama/install.sh b/backend/python/exllama/install.sh deleted file mode 100755 index d33c4356..00000000 --- a/backend/python/exllama/install.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -set -e - -LIMIT_TARGETS="cublas" - -source $(dirname $0)/../common/libbackend.sh - -installRequirements - -git clone https://github.com/turboderp/exllama $MY_DIR/source -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/source/requirements.txt - -cp -v ./*py $MY_DIR/source/ diff --git a/backend/python/exllama/requirements-cpu.txt b/backend/python/exllama/requirements-cpu.txt deleted file mode 100644 index bbcdc8cd..00000000 --- a/backend/python/exllama/requirements-cpu.txt +++ /dev/null @@ -1,3 +0,0 @@ -transformers -accelerate -torch \ No newline at end of file diff --git a/backend/python/exllama/requirements-cublas11.txt b/backend/python/exllama/requirements-cublas11.txt deleted file mode 100644 index 1dfb5b98..00000000 --- a/backend/python/exllama/requirements-cublas11.txt +++ /dev/null @@ -1,4 +0,0 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 -torch -transformers -accelerate \ No newline at end of file diff --git a/backend/python/exllama/requirements-cublas12.txt b/backend/python/exllama/requirements-cublas12.txt deleted file mode 100644 index 1ec544cd..00000000 --- a/backend/python/exllama/requirements-cublas12.txt +++ /dev/null @@ -1,3 +0,0 @@ -torch -transformers -accelerate \ No newline at end of file diff --git a/backend/python/exllama/requirements.txt b/backend/python/exllama/requirements.txt deleted file mode 100644 index b9c192d5..00000000 --- a/backend/python/exllama/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -grpcio==1.66.1 -protobuf -certifi -setuptools \ No newline at end of file diff --git a/backend/python/exllama/run.sh b/backend/python/exllama/run.sh deleted file mode 100755 index 63119689..00000000 --- a/backend/python/exllama/run.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -LIMIT_TARGETS="cublas" -BACKEND_FILE="${MY_DIR}/source/backend.py" - -source $(dirname $0)/../common/libbackend.sh - -startBackend $@ \ No newline at end of file diff --git a/backend/python/exllama/test.sh b/backend/python/exllama/test.sh deleted file mode 100755 index 6940b066..00000000 --- a/backend/python/exllama/test.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -e - -source $(dirname $0)/../common/libbackend.sh - -runUnittests From cabb1602e84535e1957412d63d5f2a3ad80c589b Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 14 Sep 2024 00:05:38 +0200 Subject: [PATCH 0102/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `feff4aa8461da7c432d144c11da4802e41fef3cf` (#3542) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a3f0ffd0..9ba109b0 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=e6b7801bd189d102d901d3e72035611a25456ef1 +CPPLLAMA_VERSION?=feff4aa8461da7c432d144c11da4802e41fef3cf # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 3d3db1d74fe037c08c6fe1296e32687ba6eaa80a Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 15 Sep 2024 23:40:51 +0200 Subject: [PATCH 0103/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `049b3a0e53c8a8e4c4576c06a1a4fccf0063a73f` (#3548) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9ba109b0..d5288dc4 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=a551933542d956ae84634937acd2942eb40efaaf +WHISPER_CPP_VERSION?=049b3a0e53c8a8e4c4576c06a1a4fccf0063a73f # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 25deb4ba95e68f40c5ee6bd4c40a2eaa91cc8af3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 16 Sep 2024 10:29:20 +0200 Subject: [PATCH 0104/1530] chore(deps): update llama.cpp to 6262d13e0b2da91f230129a93a996609a2fa2f2 (#3549) chore(deps): update llama.cpp to 6262d13e0b2da91f230129a93a996609a2f5a2f2 Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/grpc-server.cpp | 61 ++++++++++++++++--------------- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/Makefile b/Makefile index d5288dc4..e4d5b22c 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=feff4aa8461da7c432d144c11da4802e41fef3cf +CPPLLAMA_VERSION?=6262d13e0b2da91f230129a93a996609a2f5a2f2 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index a46b4ee0..56d59d21 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -13,6 +13,7 @@ #include #include "clip.h" #include "llava.h" +#include "log.h" #include "stb_image.h" #include "common.h" #include "json.hpp" @@ -448,7 +449,7 @@ struct llama_server_context LOG_INFO("Multi Modal Mode Enabled", {}); clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1); if(clp_ctx == nullptr) { - LOG_ERROR("unable to load clip model", {{"model", params.mmproj}}); + LOG_ERR("unable to load clip model: %s", params.mmproj.c_str()); return false; } @@ -462,7 +463,7 @@ struct llama_server_context ctx = llama_init.context; if (model == nullptr) { - LOG_ERROR("unable to load model", {{"model", params.model}}); + LOG_ERR("unable to load model: %s", params.model.c_str()); return false; } @@ -470,7 +471,7 @@ struct llama_server_context const int n_embd_clip = clip_n_mmproj_embd(clp_ctx); const int n_embd_llm = llama_n_embd(model); if (n_embd_clip != n_embd_llm) { - LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm); + LOG("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_embd_clip, n_embd_llm); llama_free(ctx); llama_free_model(model); return false; @@ -489,7 +490,7 @@ struct llama_server_context std::vector buf(1); int res = llama_chat_apply_template(model, nullptr, chat, 1, true, buf.data(), buf.size()); if (res < 0) { - LOG_ERROR("The chat template comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {}); + LOG_ERR("The chat template comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", __func__); sparams.chat_template = "<|im_start|>"; // llama_chat_apply_template only checks if <|im_start|> exist in the template } } @@ -812,10 +813,11 @@ struct llama_server_context img_sl.img_data = clip_image_u8_init(); if (!clip_image_load_from_bytes(image_buffer.data(), image_buffer.size(), img_sl.img_data)) { - LOG_ERROR("failed to load image", { - {"slot_id", slot->id}, - {"img_sl_id", img_sl.id} - }); + LOG_ERR("%s: failed to load image, slot_id: %d, img_sl_id: %d", + __func__, + slot->id, + img_sl.id + ); return false; } LOG_VERBOSE("image loaded", { @@ -853,12 +855,12 @@ struct llama_server_context } } if (!found) { - LOG_TEE("ERROR: Image with id: %i, not found.\n", img_id); + LOG("ERROR: Image with id: %i, not found.\n", img_id); slot->images.clear(); return false; } } catch (const std::invalid_argument& e) { - LOG_TEE("Invalid image number id in prompt\n"); + LOG("Invalid image number id in prompt\n"); slot->images.clear(); return false; } @@ -886,7 +888,7 @@ struct llama_server_context {"task_id", slot->task_id}, }); - // LOG_TEE("sampling: \n%s\n", llama_sampling_print(slot->sparams).c_str()); + // LOG("sampling: \n%s\n", llama_sampling_print(slot->sparams).c_str()); return true; } @@ -926,7 +928,7 @@ struct llama_server_context }; if (llama_decode(ctx, batch_view) != 0) { - LOG_TEE("%s: llama_decode() failed\n", __func__); + LOG("%s: llama_decode() failed\n", __func__); return; } } @@ -938,7 +940,7 @@ struct llama_server_context } } - LOG_TEE("system prompt updated\n"); + LOG("system prompt updated\n"); system_need_update = false; } @@ -1120,7 +1122,7 @@ struct llama_server_context } if (!llava_image_embed_make_with_clip_img(clp_ctx, params.cpuparams.n_threads, img.img_data, &img.image_embedding, &img.image_tokens)) { - LOG_TEE("Error processing the given image"); + LOG("Error processing the given image"); return false; } @@ -1132,7 +1134,7 @@ struct llama_server_context void send_error(task_server& task, const std::string &error) { - LOG_TEE("task %i - error: %s\n", task.id, error.c_str()); + LOG("task %i - error: %s\n", task.id, error.c_str()); task_result res; res.id = task.id; res.multitask_id = task.multitask_id; @@ -1371,7 +1373,7 @@ struct llama_server_context }; if (llama_decode(ctx, batch_view)) { - LOG_TEE("%s : failed to eval\n", __func__); + LOG("%s : failed to eval\n", __func__); return false; } } @@ -1389,7 +1391,7 @@ struct llama_server_context llama_batch batch_img = { n_eval, nullptr, (img.image_embedding + i * n_embd), nullptr, nullptr, nullptr, nullptr, slot.n_past, 1, 0, }; if (llama_decode(ctx, batch_img)) { - LOG_TEE("%s : failed to eval image\n", __func__); + LOG("%s : failed to eval image\n", __func__); return false; } slot.n_past += n_eval; @@ -1572,7 +1574,7 @@ struct llama_server_context slot.n_past = 0; slot.truncated = false; slot.has_next_token = true; - LOG_TEE("Context exhausted. Slot %d released (%d tokens in cache)\n", slot.id, (int) slot.cache_tokens.size()); + LOG("Context exhausted. Slot %d released (%d tokens in cache)\n", slot.id, (int) slot.cache_tokens.size()); continue; // END LOCALAI changes @@ -1820,10 +1822,11 @@ struct llama_server_context if (has_images && !ingest_images(slot, n_batch)) { - LOG_ERROR("failed processing images", { - "slot_id", slot.id, - "task_id", slot.task_id, - }); + LOG_ERR("%s: failed processing images Slot id : %d, Task id: %d", + __func__, + slot.id, + slot.task_id + ); // FIXME @phymbert: to be properly tested // early returning without changing the slot state will block the slot for ever // no one at the moment is checking the return value @@ -1863,10 +1866,10 @@ struct llama_server_context const int bd = (slot.ga_w / slot.ga_n) * (slot.ga_n - 1); const int dd = (slot.ga_w / slot.ga_n) - ib * bd - slot.ga_w; - LOG_TEE("\n"); - LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd); - LOG_TEE("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n); - LOG_TEE("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd); + LOG("\n"); + LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i, slot.n_past_se, ib * bd, slot.ga_i + ib * bd, slot.n_past_se + ib * bd); + LOG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w, slot.ga_n, (slot.ga_i + ib * bd) / slot.ga_n, (slot.ga_i + ib * bd + slot.ga_w) / slot.ga_n); + LOG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", slot.ga_i + ib * bd + slot.ga_w, slot.n_past_se + ib * bd, dd, slot.ga_i + ib * bd + slot.ga_w + dd, slot.n_past_se + ib * bd + dd); llama_kv_cache_seq_add(ctx, slot.id, slot.ga_i, slot.n_past_se, ib * bd); llama_kv_cache_seq_div(ctx, slot.id, slot.ga_i + ib * bd, slot.ga_i + ib * bd + slot.ga_w,slot.ga_n); @@ -1876,7 +1879,7 @@ struct llama_server_context slot.ga_i += slot.ga_w / slot.ga_n; - LOG_TEE("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i); + LOG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", slot.n_past_se + bd, slot.n_past_se, slot.ga_i); } slot.n_past_se += n_tokens; } @@ -1901,11 +1904,11 @@ struct llama_server_context if (n_batch == 1 || ret < 0) { // if you get here, it means the KV cache is full - try increasing it via the context size - LOG_TEE("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret); + LOG("%s : failed to decode the batch, n_batch = %d, ret = %d\n", __func__, n_batch, ret); return false; } - LOG_TEE("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2); + LOG("%s : failed to find free space in the KV cache, retrying with smaller n_batch = %d\n", __func__, n_batch / 2); // retry with half the batch size to try to find a free slot in the KV cache n_batch /= 2; From a8003f2b7ca013773f3bf2e5eee4b74a94f4b4cd Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 16 Sep 2024 13:06:07 +0200 Subject: [PATCH 0105/1530] models(gallery): add l3.1-8b-niitama-v1.1-iq-imatrix (#3550) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 188576b2..bdc11cf4 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -675,6 +675,29 @@ - filename: Azure_Dusk-v0.2-Q4_K_M-imat.gguf sha256: c03a670c00976d14c267a0322374ed488b2a5f4790eb509136ca4e75cbc10cf4 uri: huggingface://Lewdiculous/Azure_Dusk-v0.2-GGUF-IQ-Imatrix/Azure_Dusk-v0.2-Q4_K_M-imat.gguf +- !!merge <<: *llama31 + name: "l3.1-8b-niitama-v1.1-iq-imatrix" + icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/2Q5ky8TvP0vLS1ulMXnrn.png + urls: + - https://huggingface.co/Sao10K/L3.1-8B-Niitama-v1.1 + - https://huggingface.co/Lewdiculous/L3.1-8B-Niitama-v1.1-GGUF-IQ-Imatrix + description: | + GGUF-IQ-Imatrix quants for Sao10K/L3.1-8B-Niitama-v1.1 + Here's the subjectively superior L3 version: L3-8B-Niitama-v1 + An experimental model using experimental methods. + + More detail on it: + + Tamamo and Niitama are made from the same data. Literally. The only thing that's changed is how theyre shuffled and formatted. Yet, I get wildly different results. + + Interesting, eh? Feels kinda not as good compared to the l3 version, but it's aight. + overrides: + parameters: + model: L3.1-8B-Niitama-v1.1-Q4_K_M-imat.gguf + files: + - filename: L3.1-8B-Niitama-v1.1-Q4_K_M-imat.gguf + sha256: 524163bd0f1d43c9284b09118abcc192f3250b13dd3bb79d60c28321108b6748 + uri: huggingface://Lewdiculous/L3.1-8B-Niitama-v1.1-GGUF-IQ-Imatrix/L3.1-8B-Niitama-v1.1-Q4_K_M-imat.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 9ca5ef339a277d1c1d1adc4427ae2d99f0cf0ea0 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 16 Sep 2024 16:44:00 +0200 Subject: [PATCH 0106/1530] models(gallery): add llama-3.1-8b-stheno-v3.4-iq-imatrix (#3551) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index bdc11cf4..5e47d31c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -698,6 +698,49 @@ - filename: L3.1-8B-Niitama-v1.1-Q4_K_M-imat.gguf sha256: 524163bd0f1d43c9284b09118abcc192f3250b13dd3bb79d60c28321108b6748 uri: huggingface://Lewdiculous/L3.1-8B-Niitama-v1.1-GGUF-IQ-Imatrix/L3.1-8B-Niitama-v1.1-Q4_K_M-imat.gguf +- !!merge <<: *llama31 + name: "llama-3.1-8b-stheno-v3.4-iq-imatrix" + icon: https://huggingface.co/Sao10K/Llama-3.1-8B-Stheno-v3.4/resolve/main/meneno.jpg + urls: + - https://huggingface.co/Sao10K/Llama-3.1-8B-Stheno-v3.4 + - https://huggingface.co/Lewdiculous/Llama-3.1-8B-Stheno-v3.4-GGUF-IQ-Imatrix + description: | + This model has went through a multi-stage finetuning process. + + - 1st, over a multi-turn Conversational-Instruct + - 2nd, over a Creative Writing / Roleplay along with some Creative-based Instruct Datasets. + - - Dataset consists of a mixture of Human and Claude Data. + + Prompting Format: + + - Use the L3 Instruct Formatting - Euryale 2.1 Preset Works Well + - Temperature + min_p as per usual, I recommend 1.4 Temp + 0.2 min_p. + - Has a different vibe to previous versions. Tinker around. + + Changes since previous Stheno Datasets: + + - Included Multi-turn Conversation-based Instruct Datasets to boost multi-turn coherency. # This is a seperate set, not the ones made by Kalomaze and Nopm, that are used in Magnum. They're completely different data. + - Replaced Single-Turn Instruct with Better Prompts and Answers by Claude 3.5 Sonnet and Claude 3 Opus. + - Removed c2 Samples -> Underway of re-filtering and masking to use with custom prefills. TBD + - Included 55% more Roleplaying Examples based of [Gryphe's](https://huggingface.co/datasets/Gryphe/Sonnet3.5-Charcard-Roleplay) Charcard RP Sets. Further filtered and cleaned on. + - Included 40% More Creative Writing Examples. + - Included Datasets Targeting System Prompt Adherence. + - Included Datasets targeting Reasoning / Spatial Awareness. + - Filtered for the usual errors, slop and stuff at the end. Some may have slipped through, but I removed nearly all of it. + + Personal Opinions: + + - Llama3.1 was more disappointing, in the Instruct Tune? It felt overbaked, atleast. Likely due to the DPO being done after their SFT Stage. + - Tuning on L3.1 base did not give good results, unlike when I tested with Nemo base. unfortunate. + - Still though, I think I did an okay job. It does feel a bit more distinctive. + - It took a lot of tinkering, like a LOT to wrangle this. + overrides: + parameters: + model: Llama-3.1-8B-Stheno-v3.4-Q4_K_M-imat.gguf + files: + - filename: Llama-3.1-8B-Stheno-v3.4-Q4_K_M-imat.gguf + sha256: 830d4858aa11a654f82f69fa40dee819edf9ecf54213057648304eb84b8dd5eb + uri: huggingface://Lewdiculous/Llama-3.1-8B-Stheno-v3.4-GGUF-IQ-Imatrix/Llama-3.1-8B-Stheno-v3.4-Q4_K_M-imat.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From fec01d9e6955a7347cd05c317bd24fd047da0cc6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:00:35 +0000 Subject: [PATCH 0107/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `f696f60` to `d5a0ee0` (#3558) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `f696f60` to `d5a0ee0`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/f696f60f4e44e18a34512b895a7b65a72c801bd8...d5a0ee04ad986394d6d2f1e1a57f2334d24bf317) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index f696f60f..d5a0ee04 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit f696f60f4e44e18a34512b895a7b65a72c801bd8 +Subproject commit d5a0ee04ad986394d6d2f1e1a57f2334d24bf317 From 2edc732c3398599b1d86a8930286ccc9fd3762e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:23:06 +0000 Subject: [PATCH 0108/1530] chore(deps): Bump setuptools from 72.1.0 to 75.1.0 in /backend/python/coqui (#3554) chore(deps): Bump setuptools in /backend/python/coqui Bumps [setuptools](https://github.com/pypa/setuptools) from 72.1.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v72.1.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/coqui/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/coqui/requirements-intel.txt b/backend/python/coqui/requirements-intel.txt index 002a55c3..c0e4dcaa 100644 --- a/backend/python/coqui/requirements-intel.txt +++ b/backend/python/coqui/requirements-intel.txt @@ -3,6 +3,6 @@ intel-extension-for-pytorch torch torchaudio optimum[openvino] -setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406 +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 transformers accelerate \ No newline at end of file From a5ce987bdbd98b6c8659a92dfbcc9d99bbf52f5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:35:10 +0000 Subject: [PATCH 0109/1530] chore(deps): Bump langchain from 0.2.16 to 0.3.0 in /examples/functions (#3559) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.16 to 0.3.0. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.16...langchain==0.3.0) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 8258885a..9dd6818f 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.2.16 +langchain==0.3.0 openai==1.44.0 From 149cc1eb13d3bd9af76ed13d72bff02cc685e601 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:44:34 +0000 Subject: [PATCH 0110/1530] chore(deps): Bump openai from 1.44.1 to 1.45.1 in /examples/langchain-chroma (#3556) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.44.1 to 1.45.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.44.1...v1.45.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index c9bce6e9..3edb570c 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.2.16 -openai==1.44.1 +openai==1.45.1 chromadb==0.5.5 llama-index==0.11.7 \ No newline at end of file From 09c7d8d4587f9e09cd6e50534b2677abc85db1ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:46:26 +0000 Subject: [PATCH 0111/1530] chore(deps): Bump setuptools from 72.1.0 to 75.1.0 in /backend/python/autogptq (#3553) chore(deps): Bump setuptools in /backend/python/autogptq Bumps [setuptools](https://github.com/pypa/setuptools) from 72.1.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v72.1.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/autogptq/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/autogptq/requirements-intel.txt b/backend/python/autogptq/requirements-intel.txt index 755e19d8..d5e0173e 100644 --- a/backend/python/autogptq/requirements-intel.txt +++ b/backend/python/autogptq/requirements-intel.txt @@ -2,4 +2,4 @@ intel-extension-for-pytorch torch optimum[openvino] -setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From 12a8d0e46fbd03f8d550dc41ea6325d07d66cd00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:57:16 +0000 Subject: [PATCH 0112/1530] chore(deps): Bump securego/gosec from 2.21.0 to 2.21.2 (#3561) Bumps [securego/gosec](https://github.com/securego/gosec) from 2.21.0 to 2.21.2. - [Release notes](https://github.com/securego/gosec/releases) - [Changelog](https://github.com/securego/gosec/blob/master/.goreleaser.yml) - [Commits](https://github.com/securego/gosec/compare/v2.21.0...v2.21.2) --- updated-dependencies: - dependency-name: securego/gosec dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/secscan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/secscan.yaml b/.github/workflows/secscan.yaml index db9db586..08d7dfc6 100644 --- a/.github/workflows/secscan.yaml +++ b/.github/workflows/secscan.yaml @@ -18,7 +18,7 @@ jobs: if: ${{ github.actor != 'dependabot[bot]' }} - name: Run Gosec Security Scanner if: ${{ github.actor != 'dependabot[bot]' }} - uses: securego/gosec@v2.21.0 + uses: securego/gosec@v2.21.2 with: # we let the report trigger content trigger a failure using the GitHub Security features. args: '-no-fail -fmt sarif -out results.sarif ./...' From afb5bbc1b88f71454a8b6081f8f8d46ad0eb9b35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:03:06 +0000 Subject: [PATCH 0113/1530] chore(deps): Bump setuptools from 69.5.1 to 75.1.0 in /backend/python/transformers-musicgen (#3564) chore(deps): Bump setuptools in /backend/python/transformers-musicgen Bumps [setuptools](https://github.com/pypa/setuptools) from 69.5.1 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v69.5.1...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/transformers-musicgen/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/transformers-musicgen/requirements-intel.txt b/backend/python/transformers-musicgen/requirements-intel.txt index 89bfa6a2..608d6939 100644 --- a/backend/python/transformers-musicgen/requirements-intel.txt +++ b/backend/python/transformers-musicgen/requirements-intel.txt @@ -4,4 +4,4 @@ transformers accelerate torch optimum[openvino] -setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From 30fe16310035d3942368745f17d1673c889a4ddc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:13:09 +0000 Subject: [PATCH 0114/1530] chore(deps): Bump setuptools from 72.1.0 to 75.1.0 in /backend/python/parler-tts (#3565) chore(deps): Bump setuptools in /backend/python/parler-tts Bumps [setuptools](https://github.com/pypa/setuptools) from 72.1.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v72.1.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/parler-tts/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/parler-tts/requirements-intel.txt b/backend/python/parler-tts/requirements-intel.txt index 002a55c3..c0e4dcaa 100644 --- a/backend/python/parler-tts/requirements-intel.txt +++ b/backend/python/parler-tts/requirements-intel.txt @@ -3,6 +3,6 @@ intel-extension-for-pytorch torch torchaudio optimum[openvino] -setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406 +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 transformers accelerate \ No newline at end of file From 5356b81b7f112c57dcc8a215b1f14c86e7ee3f40 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:40:39 +0000 Subject: [PATCH 0115/1530] chore(deps): Bump sentence-transformers from 3.0.1 to 3.1.0 in /backend/python/sentencetransformers (#3566) chore(deps): Bump sentence-transformers Bumps [sentence-transformers](https://github.com/UKPLab/sentence-transformers) from 3.0.1 to 3.1.0. - [Release notes](https://github.com/UKPLab/sentence-transformers/releases) - [Commits](https://github.com/UKPLab/sentence-transformers/compare/v3.0.1...v3.1.0) --- updated-dependencies: - dependency-name: sentence-transformers dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/sentencetransformers/requirements-cpu.txt | 2 +- backend/python/sentencetransformers/requirements-cublas11.txt | 2 +- backend/python/sentencetransformers/requirements-cublas12.txt | 2 +- backend/python/sentencetransformers/requirements-hipblas.txt | 2 +- backend/python/sentencetransformers/requirements-intel.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/python/sentencetransformers/requirements-cpu.txt b/backend/python/sentencetransformers/requirements-cpu.txt index cd9924ef..f88de1e4 100644 --- a/backend/python/sentencetransformers/requirements-cpu.txt +++ b/backend/python/sentencetransformers/requirements-cpu.txt @@ -2,5 +2,5 @@ torch accelerate transformers bitsandbytes -sentence-transformers==3.0.1 +sentence-transformers==3.1.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas11.txt b/backend/python/sentencetransformers/requirements-cublas11.txt index 1131f066..57caf1a1 100644 --- a/backend/python/sentencetransformers/requirements-cublas11.txt +++ b/backend/python/sentencetransformers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 torch accelerate -sentence-transformers==3.0.1 +sentence-transformers==3.1.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas12.txt b/backend/python/sentencetransformers/requirements-cublas12.txt index 2936e17b..834fa6a4 100644 --- a/backend/python/sentencetransformers/requirements-cublas12.txt +++ b/backend/python/sentencetransformers/requirements-cublas12.txt @@ -1,4 +1,4 @@ torch accelerate -sentence-transformers==3.0.1 +sentence-transformers==3.1.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-hipblas.txt b/backend/python/sentencetransformers/requirements-hipblas.txt index 3b187c68..98a0a41b 100644 --- a/backend/python/sentencetransformers/requirements-hipblas.txt +++ b/backend/python/sentencetransformers/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 torch accelerate -sentence-transformers==3.0.1 +sentence-transformers==3.1.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-intel.txt b/backend/python/sentencetransformers/requirements-intel.txt index 806e3d47..5948910d 100644 --- a/backend/python/sentencetransformers/requirements-intel.txt +++ b/backend/python/sentencetransformers/requirements-intel.txt @@ -4,5 +4,5 @@ torch optimum[openvino] setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 accelerate -sentence-transformers==3.0.1 +sentence-transformers==3.1.0 transformers \ No newline at end of file From c866b77586f25340d98a9fbb2ad16e22d5e4d577 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:02:42 +0000 Subject: [PATCH 0116/1530] chore(deps): Bump llama-index from 0.11.7 to 0.11.9 in /examples/chainlit (#3567) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.7 to 0.11.9. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.7...v0.11.9) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 69212e28..df8bea7f 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.7 +llama_index==0.11.9 requests==2.32.3 weaviate_client==4.6.7 transformers From 42d6b9e0ccc75fd3ecfb6275b0fe50236fdfc9f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:11:15 +0000 Subject: [PATCH 0117/1530] chore(deps): Bump weaviate-client from 4.6.7 to 4.8.1 in /examples/chainlit (#3568) chore(deps): Bump weaviate-client in /examples/chainlit Bumps [weaviate-client](https://github.com/weaviate/weaviate-python-client) from 4.6.7 to 4.8.1. - [Release notes](https://github.com/weaviate/weaviate-python-client/releases) - [Changelog](https://github.com/weaviate/weaviate-python-client/blob/main/docs/changelog.rst) - [Commits](https://github.com/weaviate/weaviate-python-client/compare/v4.6.7...v4.8.1) --- updated-dependencies: - dependency-name: weaviate-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index df8bea7f..1fe9356a 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,6 +1,6 @@ llama_index==0.11.9 requests==2.32.3 -weaviate_client==4.6.7 +weaviate_client==4.8.1 transformers torch chainlit From abc27e0dc49dfba0ef5436c08acf5c5959f354ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:51:55 +0000 Subject: [PATCH 0118/1530] chore(deps): Bump setuptools from 72.1.0 to 75.1.0 in /backend/python/vall-e-x (#3570) chore(deps): Bump setuptools in /backend/python/vall-e-x Bumps [setuptools](https://github.com/pypa/setuptools) from 72.1.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v72.1.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/vall-e-x/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/vall-e-x/requirements-intel.txt b/backend/python/vall-e-x/requirements-intel.txt index 6185314f..adbabeac 100644 --- a/backend/python/vall-e-x/requirements-intel.txt +++ b/backend/python/vall-e-x/requirements-intel.txt @@ -4,4 +4,4 @@ accelerate torch torchaudio optimum[openvino] -setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From 36e19928eb2ad4f4976454873f101112d131b564 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 01:14:39 +0000 Subject: [PATCH 0119/1530] chore(deps): Bump greenlet from 3.0.3 to 3.1.0 in /examples/langchain/langchainpy-localai-example (#3571) chore(deps): Bump greenlet Bumps [greenlet](https://github.com/python-greenlet/greenlet) from 3.0.3 to 3.1.0. - [Changelog](https://github.com/python-greenlet/greenlet/blob/master/CHANGES.rst) - [Commits](https://github.com/python-greenlet/greenlet/compare/3.0.3...3.1.0) --- updated-dependencies: - dependency-name: greenlet dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 75323005..1bd6b841 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -8,7 +8,7 @@ colorama==0.4.6 dataclasses-json==0.6.7 debugpy==1.8.2 frozenlist==1.4.1 -greenlet==3.0.3 +greenlet==3.1.0 idna==3.8 langchain==0.2.16 langchain-community==0.2.16 From 2394f7833fab174663231b722e5de964446d2cbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 02:28:05 +0000 Subject: [PATCH 0120/1530] chore(deps): Bump setuptools from 70.3.0 to 75.1.0 in /backend/python/diffusers (#3575) chore(deps): Bump setuptools in /backend/python/diffusers Bumps [setuptools](https://github.com/pypa/setuptools) from 70.3.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v70.3.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/diffusers/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/diffusers/requirements-intel.txt b/backend/python/diffusers/requirements-intel.txt index 1cc2e2a2..566278a8 100644 --- a/backend/python/diffusers/requirements-intel.txt +++ b/backend/python/diffusers/requirements-intel.txt @@ -3,7 +3,7 @@ intel-extension-for-pytorch torch torchvision optimum[openvino] -setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406 +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 diffusers opencv-python transformers From 06c83398624549fba12e5ec975c2c25a0e7e649a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 02:32:33 +0000 Subject: [PATCH 0121/1530] chore(deps): Bump setuptools from 70.3.0 to 75.1.0 in /backend/python/bark (#3574) chore(deps): Bump setuptools in /backend/python/bark Bumps [setuptools](https://github.com/pypa/setuptools) from 70.3.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v70.3.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/bark/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/bark/requirements-intel.txt b/backend/python/bark/requirements-intel.txt index 9feb6eef..c0e4dcaa 100644 --- a/backend/python/bark/requirements-intel.txt +++ b/backend/python/bark/requirements-intel.txt @@ -3,6 +3,6 @@ intel-extension-for-pytorch torch torchaudio optimum[openvino] -setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406 +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 transformers accelerate \ No newline at end of file From a9a3a07c3bf22b2a3741471f6122876c65d8909a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 03:24:30 +0000 Subject: [PATCH 0122/1530] chore(deps): Bump setuptools from 72.1.0 to 75.1.0 in /backend/python/rerankers (#3578) chore(deps): Bump setuptools in /backend/python/rerankers Bumps [setuptools](https://github.com/pypa/setuptools) from 72.1.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v72.1.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/rerankers/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/rerankers/requirements-intel.txt b/backend/python/rerankers/requirements-intel.txt index 1a39cf4f..e6bb4cc7 100644 --- a/backend/python/rerankers/requirements-intel.txt +++ b/backend/python/rerankers/requirements-intel.txt @@ -5,4 +5,4 @@ accelerate torch rerankers[transformers] optimum[openvino] -setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From db1159b6511e8fa09e594f9db0fec6ab4e142468 Mon Sep 17 00:00:00 2001 From: Dave Date: Mon, 16 Sep 2024 23:29:07 -0400 Subject: [PATCH 0123/1530] feat: auth v2 - supersedes #2894 (#3476) feat: auth v2 - supercedes #2894, metrics to follow later Signed-off-by: Dave Lee --- core/cli/run.go | 56 ++++++++++--------- core/config/application_config.go | 40 +++++++++++-- core/http/app.go | 49 +++++----------- core/http/middleware/auth.go | 93 +++++++++++++++++++++++++++++++ core/http/routes/elevenlabs.go | 7 +-- core/http/routes/jina.go | 3 +- core/http/routes/localai.go | 41 +++++++------- core/http/routes/openai.go | 89 +++++++++++++++-------------- core/http/routes/ui.go | 41 +++++++------- go.mod | 1 + go.sum | 2 + 11 files changed, 264 insertions(+), 158 deletions(-) create mode 100644 core/http/middleware/auth.go diff --git a/core/cli/run.go b/core/cli/run.go index 55ae0fd5..afb7204c 100644 --- a/core/cli/run.go +++ b/core/cli/run.go @@ -41,31 +41,34 @@ type RunCMD struct { Threads int `env:"LOCALAI_THREADS,THREADS" short:"t" help:"Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested" group:"performance"` ContextSize int `env:"LOCALAI_CONTEXT_SIZE,CONTEXT_SIZE" default:"512" help:"Default context size for models" group:"performance"` - Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"` - CORS bool `env:"LOCALAI_CORS,CORS" help:"" group:"api"` - CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"` - LibraryPath string `env:"LOCALAI_LIBRARY_PATH,LIBRARY_PATH" help:"Path to the library directory (for e.g. external libraries used by backends)" default:"/usr/share/local-ai/libs" group:"backends"` - CSRF bool `env:"LOCALAI_CSRF" help:"Enables fiber CSRF middleware" group:"api"` - UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"` - APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"` - DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"` - DisablePredownloadScan bool `env:"LOCALAI_DISABLE_PREDOWNLOAD_SCAN" help:"If true, disables the best-effort security scanner before downloading any files." group:"hardening" default:"false"` - OpaqueErrors bool `env:"LOCALAI_OPAQUE_ERRORS" default:"false" help:"If true, all error responses are replaced with blank 500 errors. This is intended only for hardening against information leaks and is normally not recommended." group:"hardening"` - Peer2Peer bool `env:"LOCALAI_P2P,P2P" name:"p2p" default:"false" help:"Enable P2P mode" group:"p2p"` - Peer2PeerDHTInterval int `env:"LOCALAI_P2P_DHT_INTERVAL,P2P_DHT_INTERVAL" default:"360" name:"p2p-dht-interval" help:"Interval for DHT refresh (used during token generation)" group:"p2p"` - Peer2PeerOTPInterval int `env:"LOCALAI_P2P_OTP_INTERVAL,P2P_OTP_INTERVAL" default:"9000" name:"p2p-otp-interval" help:"Interval for OTP refresh (used during token generation)" group:"p2p"` - Peer2PeerToken string `env:"LOCALAI_P2P_TOKEN,P2P_TOKEN,TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"` - Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"` - ParallelRequests bool `env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"` - SingleActiveBackend bool `env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"` - PreloadBackendOnly bool `env:"LOCALAI_PRELOAD_BACKEND_ONLY,PRELOAD_BACKEND_ONLY" default:"false" help:"Do not launch the API services, only the preloaded models / backends are started (useful for multi-node setups)" group:"backends"` - ExternalGRPCBackends []string `env:"LOCALAI_EXTERNAL_GRPC_BACKENDS,EXTERNAL_GRPC_BACKENDS" help:"A list of external grpc backends" group:"backends"` - EnableWatchdogIdle bool `env:"LOCALAI_WATCHDOG_IDLE,WATCHDOG_IDLE" default:"false" help:"Enable watchdog for stopping backends that are idle longer than the watchdog-idle-timeout" group:"backends"` - WatchdogIdleTimeout string `env:"LOCALAI_WATCHDOG_IDLE_TIMEOUT,WATCHDOG_IDLE_TIMEOUT" default:"15m" help:"Threshold beyond which an idle backend should be stopped" group:"backends"` - EnableWatchdogBusy bool `env:"LOCALAI_WATCHDOG_BUSY,WATCHDOG_BUSY" default:"false" help:"Enable watchdog for stopping backends that are busy longer than the watchdog-busy-timeout" group:"backends"` - WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"` - Federated bool `env:"LOCALAI_FEDERATED,FEDERATED" help:"Enable federated instance" group:"federated"` - DisableGalleryEndpoint bool `env:"LOCALAI_DISABLE_GALLERY_ENDPOINT,DISABLE_GALLERY_ENDPOINT" help:"Disable the gallery endpoints" group:"api"` + Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"` + CORS bool `env:"LOCALAI_CORS,CORS" help:"" group:"api"` + CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"` + LibraryPath string `env:"LOCALAI_LIBRARY_PATH,LIBRARY_PATH" help:"Path to the library directory (for e.g. external libraries used by backends)" default:"/usr/share/local-ai/libs" group:"backends"` + CSRF bool `env:"LOCALAI_CSRF" help:"Enables fiber CSRF middleware" group:"api"` + UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"` + APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"` + DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"` + DisablePredownloadScan bool `env:"LOCALAI_DISABLE_PREDOWNLOAD_SCAN" help:"If true, disables the best-effort security scanner before downloading any files." group:"hardening" default:"false"` + OpaqueErrors bool `env:"LOCALAI_OPAQUE_ERRORS" default:"false" help:"If true, all error responses are replaced with blank 500 errors. This is intended only for hardening against information leaks and is normally not recommended." group:"hardening"` + UseSubtleKeyComparison bool `env:"LOCALAI_SUBTLE_KEY_COMPARISON" default:"false" help:"If true, API Key validation comparisons will be performed using constant-time comparisons rather than simple equality. This trades off performance on each request for resiliancy against timing attacks." group:"hardening"` + DisableApiKeyRequirementForHttpGet bool `env:"LOCALAI_DISABLE_API_KEY_REQUIREMENT_FOR_HTTP_GET" default:"false" help:"If true, a valid API key is not required to issue GET requests to portions of the web ui. This should only be enabled in secure testing environments" group:"hardening"` + HttpGetExemptedEndpoints []string `env:"LOCALAI_HTTP_GET_EXEMPTED_ENDPOINTS" default:"^/$,^/browse/?$,^/talk/?$,^/p2p/?$,^/chat/?$,^/text2image/?$,^/tts/?$,^/static/.*$,^/swagger.*$" help:"If LOCALAI_DISABLE_API_KEY_REQUIREMENT_FOR_HTTP_GET is overriden to true, this is the list of endpoints to exempt. Only adjust this in case of a security incident or as a result of a personal security posture review" group:"hardening"` + Peer2Peer bool `env:"LOCALAI_P2P,P2P" name:"p2p" default:"false" help:"Enable P2P mode" group:"p2p"` + Peer2PeerDHTInterval int `env:"LOCALAI_P2P_DHT_INTERVAL,P2P_DHT_INTERVAL" default:"360" name:"p2p-dht-interval" help:"Interval for DHT refresh (used during token generation)" group:"p2p"` + Peer2PeerOTPInterval int `env:"LOCALAI_P2P_OTP_INTERVAL,P2P_OTP_INTERVAL" default:"9000" name:"p2p-otp-interval" help:"Interval for OTP refresh (used during token generation)" group:"p2p"` + Peer2PeerToken string `env:"LOCALAI_P2P_TOKEN,P2P_TOKEN,TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"` + Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"` + ParallelRequests bool `env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"` + SingleActiveBackend bool `env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"` + PreloadBackendOnly bool `env:"LOCALAI_PRELOAD_BACKEND_ONLY,PRELOAD_BACKEND_ONLY" default:"false" help:"Do not launch the API services, only the preloaded models / backends are started (useful for multi-node setups)" group:"backends"` + ExternalGRPCBackends []string `env:"LOCALAI_EXTERNAL_GRPC_BACKENDS,EXTERNAL_GRPC_BACKENDS" help:"A list of external grpc backends" group:"backends"` + EnableWatchdogIdle bool `env:"LOCALAI_WATCHDOG_IDLE,WATCHDOG_IDLE" default:"false" help:"Enable watchdog for stopping backends that are idle longer than the watchdog-idle-timeout" group:"backends"` + WatchdogIdleTimeout string `env:"LOCALAI_WATCHDOG_IDLE_TIMEOUT,WATCHDOG_IDLE_TIMEOUT" default:"15m" help:"Threshold beyond which an idle backend should be stopped" group:"backends"` + EnableWatchdogBusy bool `env:"LOCALAI_WATCHDOG_BUSY,WATCHDOG_BUSY" default:"false" help:"Enable watchdog for stopping backends that are busy longer than the watchdog-busy-timeout" group:"backends"` + WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"` + Federated bool `env:"LOCALAI_FEDERATED,FEDERATED" help:"Enable federated instance" group:"federated"` + DisableGalleryEndpoint bool `env:"LOCALAI_DISABLE_GALLERY_ENDPOINT,DISABLE_GALLERY_ENDPOINT" help:"Disable the gallery endpoints" group:"api"` } func (r *RunCMD) Run(ctx *cliContext.Context) error { @@ -97,6 +100,9 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error { config.WithModelsURL(append(r.Models, r.ModelArgs...)...), config.WithOpaqueErrors(r.OpaqueErrors), config.WithEnforcedPredownloadScans(!r.DisablePredownloadScan), + config.WithSubtleKeyComparison(r.UseSubtleKeyComparison), + config.WithDisableApiKeyRequirementForHttpGet(r.DisableApiKeyRequirementForHttpGet), + config.WithHttpGetExemptedEndpoints(r.HttpGetExemptedEndpoints), config.WithP2PNetworkID(r.Peer2PeerNetworkID), } diff --git a/core/config/application_config.go b/core/config/application_config.go index 947c4f13..afbf325f 100644 --- a/core/config/application_config.go +++ b/core/config/application_config.go @@ -4,6 +4,7 @@ import ( "context" "embed" "encoding/json" + "regexp" "time" "github.com/mudler/LocalAI/pkg/xsysinfo" @@ -16,7 +17,6 @@ type ApplicationConfig struct { ModelPath string LibPath string UploadLimitMB, Threads, ContextSize int - DisableWebUI bool F16 bool Debug bool ImageDir string @@ -31,11 +31,17 @@ type ApplicationConfig struct { PreloadModelsFromPath string CORSAllowOrigins string ApiKeys []string - EnforcePredownloadScans bool - OpaqueErrors bool P2PToken string P2PNetworkID string + DisableWebUI bool + EnforcePredownloadScans bool + OpaqueErrors bool + UseSubtleKeyComparison bool + DisableApiKeyRequirementForHttpGet bool + HttpGetExemptedEndpoints []*regexp.Regexp + DisableGalleryEndpoint bool + ModelLibraryURL string Galleries []Gallery @@ -57,8 +63,6 @@ type ApplicationConfig struct { ModelsURL []string WatchDogBusyTimeout, WatchDogIdleTimeout time.Duration - - DisableGalleryEndpoint bool } type AppOption func(*ApplicationConfig) @@ -327,6 +331,32 @@ func WithOpaqueErrors(opaque bool) AppOption { } } +func WithSubtleKeyComparison(subtle bool) AppOption { + return func(o *ApplicationConfig) { + o.UseSubtleKeyComparison = subtle + } +} + +func WithDisableApiKeyRequirementForHttpGet(required bool) AppOption { + return func(o *ApplicationConfig) { + o.DisableApiKeyRequirementForHttpGet = required + } +} + +func WithHttpGetExemptedEndpoints(endpoints []string) AppOption { + return func(o *ApplicationConfig) { + o.HttpGetExemptedEndpoints = []*regexp.Regexp{} + for _, epr := range endpoints { + r, err := regexp.Compile(epr) + if err == nil && r != nil { + o.HttpGetExemptedEndpoints = append(o.HttpGetExemptedEndpoints, r) + } else { + log.Warn().Err(err).Str("regex", epr).Msg("Error while compiling HTTP Get Exemption regex, skipping this entry.") + } + } + } +} + // ToConfigLoaderOptions returns a slice of ConfigLoader Option. // Some options defined at the application level are going to be passed as defaults for // all the configuration for the models. diff --git a/core/http/app.go b/core/http/app.go index 6eb9c956..fa9cd866 100644 --- a/core/http/app.go +++ b/core/http/app.go @@ -3,13 +3,15 @@ package http import ( "embed" "errors" + "fmt" "net/http" - "strings" + "github.com/dave-gray101/v2keyauth" "github.com/mudler/LocalAI/pkg/utils" "github.com/mudler/LocalAI/core/http/endpoints/localai" "github.com/mudler/LocalAI/core/http/endpoints/openai" + "github.com/mudler/LocalAI/core/http/middleware" "github.com/mudler/LocalAI/core/http/routes" "github.com/mudler/LocalAI/core/config" @@ -137,37 +139,14 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi }) } - // Auth middleware checking if API key is valid. If no API key is set, no auth is required. - auth := func(c *fiber.Ctx) error { - if len(appConfig.ApiKeys) == 0 { - return c.Next() - } - - if len(appConfig.ApiKeys) == 0 { - return c.Next() - } - - authHeader := readAuthHeader(c) - if authHeader == "" { - return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"message": "Authorization header missing"}) - } - - // If it's a bearer token - authHeaderParts := strings.Split(authHeader, " ") - if len(authHeaderParts) != 2 || authHeaderParts[0] != "Bearer" { - return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"message": "Invalid Authorization header format"}) - } - - apiKey := authHeaderParts[1] - for _, key := range appConfig.ApiKeys { - if apiKey == key { - return c.Next() - } - } - - return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"message": "Invalid API key"}) + kaConfig, err := middleware.GetKeyAuthConfig(appConfig) + if err != nil || kaConfig == nil { + return nil, fmt.Errorf("failed to create key auth config: %w", err) } + // Auth is applied to _all_ endpoints. No exceptions. Filtering out endpoints to bypass is the role of the Filter property of the KeyAuth Configuration + app.Use(v2keyauth.New(*kaConfig)) + if appConfig.CORS { var c func(ctx *fiber.Ctx) error if appConfig.CORSAllowOrigins == "" { @@ -192,13 +171,13 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi galleryService := services.NewGalleryService(appConfig) galleryService.Start(appConfig.Context, cl) - routes.RegisterElevenLabsRoutes(app, cl, ml, appConfig, auth) - routes.RegisterLocalAIRoutes(app, cl, ml, appConfig, galleryService, auth) - routes.RegisterOpenAIRoutes(app, cl, ml, appConfig, auth) + routes.RegisterElevenLabsRoutes(app, cl, ml, appConfig) + routes.RegisterLocalAIRoutes(app, cl, ml, appConfig, galleryService) + routes.RegisterOpenAIRoutes(app, cl, ml, appConfig) if !appConfig.DisableWebUI { - routes.RegisterUIRoutes(app, cl, ml, appConfig, galleryService, auth) + routes.RegisterUIRoutes(app, cl, ml, appConfig, galleryService) } - routes.RegisterJINARoutes(app, cl, ml, appConfig, auth) + routes.RegisterJINARoutes(app, cl, ml, appConfig) httpFS := http.FS(embedDirStatic) diff --git a/core/http/middleware/auth.go b/core/http/middleware/auth.go new file mode 100644 index 00000000..bc8bcf80 --- /dev/null +++ b/core/http/middleware/auth.go @@ -0,0 +1,93 @@ +package middleware + +import ( + "crypto/subtle" + "errors" + + "github.com/dave-gray101/v2keyauth" + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/keyauth" + "github.com/mudler/LocalAI/core/config" +) + +// This file contains the configuration generators and handler functions that are used along with the fiber/keyauth middleware +// Currently this requires an upstream patch - and feature patches are no longer accepted to v2 +// Therefore `dave-gray101/v2keyauth` contains the v2 backport of the middleware until v3 stabilizes and we migrate. + +func GetKeyAuthConfig(applicationConfig *config.ApplicationConfig) (*v2keyauth.Config, error) { + customLookup, err := v2keyauth.MultipleKeySourceLookup([]string{"header:Authorization", "header:x-api-key", "header:xi-api-key"}, keyauth.ConfigDefault.AuthScheme) + if err != nil { + return nil, err + } + + return &v2keyauth.Config{ + CustomKeyLookup: customLookup, + Next: getApiKeyRequiredFilterFunction(applicationConfig), + Validator: getApiKeyValidationFunction(applicationConfig), + ErrorHandler: getApiKeyErrorHandler(applicationConfig), + AuthScheme: "Bearer", + }, nil +} + +func getApiKeyErrorHandler(applicationConfig *config.ApplicationConfig) fiber.ErrorHandler { + return func(ctx *fiber.Ctx, err error) error { + if errors.Is(err, v2keyauth.ErrMissingOrMalformedAPIKey) { + if len(applicationConfig.ApiKeys) == 0 { + return ctx.Next() // if no keys are set up, any error we get here is not an error. + } + if applicationConfig.OpaqueErrors { + return ctx.SendStatus(403) + } + } + if applicationConfig.OpaqueErrors { + return ctx.SendStatus(500) + } + return err + } +} + +func getApiKeyValidationFunction(applicationConfig *config.ApplicationConfig) func(*fiber.Ctx, string) (bool, error) { + + if applicationConfig.UseSubtleKeyComparison { + return func(ctx *fiber.Ctx, apiKey string) (bool, error) { + if len(applicationConfig.ApiKeys) == 0 { + return true, nil // If no keys are setup, accept everything + } + for _, validKey := range applicationConfig.ApiKeys { + if subtle.ConstantTimeCompare([]byte(apiKey), []byte(validKey)) == 1 { + return true, nil + } + } + return false, v2keyauth.ErrMissingOrMalformedAPIKey + } + } + + return func(ctx *fiber.Ctx, apiKey string) (bool, error) { + if len(applicationConfig.ApiKeys) == 0 { + return true, nil // If no keys are setup, accept everything + } + for _, validKey := range applicationConfig.ApiKeys { + if apiKey == validKey { + return true, nil + } + } + return false, v2keyauth.ErrMissingOrMalformedAPIKey + } +} + +func getApiKeyRequiredFilterFunction(applicationConfig *config.ApplicationConfig) func(*fiber.Ctx) bool { + if applicationConfig.DisableApiKeyRequirementForHttpGet { + return func(c *fiber.Ctx) bool { + if c.Method() != "GET" { + return false + } + for _, rx := range applicationConfig.HttpGetExemptedEndpoints { + if rx.MatchString(c.Path()) { + return true + } + } + return false + } + } + return func(c *fiber.Ctx) bool { return false } +} \ No newline at end of file diff --git a/core/http/routes/elevenlabs.go b/core/http/routes/elevenlabs.go index b20dec75..73387c7b 100644 --- a/core/http/routes/elevenlabs.go +++ b/core/http/routes/elevenlabs.go @@ -10,12 +10,11 @@ import ( func RegisterElevenLabsRoutes(app *fiber.App, cl *config.BackendConfigLoader, ml *model.ModelLoader, - appConfig *config.ApplicationConfig, - auth func(*fiber.Ctx) error) { + appConfig *config.ApplicationConfig) { // Elevenlabs - app.Post("/v1/text-to-speech/:voice-id", auth, elevenlabs.TTSEndpoint(cl, ml, appConfig)) + app.Post("/v1/text-to-speech/:voice-id", elevenlabs.TTSEndpoint(cl, ml, appConfig)) - app.Post("/v1/sound-generation", auth, elevenlabs.SoundGenerationEndpoint(cl, ml, appConfig)) + app.Post("/v1/sound-generation", elevenlabs.SoundGenerationEndpoint(cl, ml, appConfig)) } diff --git a/core/http/routes/jina.go b/core/http/routes/jina.go index 92f29224..93125e6c 100644 --- a/core/http/routes/jina.go +++ b/core/http/routes/jina.go @@ -11,8 +11,7 @@ import ( func RegisterJINARoutes(app *fiber.App, cl *config.BackendConfigLoader, ml *model.ModelLoader, - appConfig *config.ApplicationConfig, - auth func(*fiber.Ctx) error) { + appConfig *config.ApplicationConfig) { // POST endpoint to mimic the reranking app.Post("/v1/rerank", jina.JINARerankEndpoint(cl, ml, appConfig)) diff --git a/core/http/routes/localai.go b/core/http/routes/localai.go index f85fa807..29fef378 100644 --- a/core/http/routes/localai.go +++ b/core/http/routes/localai.go @@ -15,33 +15,32 @@ func RegisterLocalAIRoutes(app *fiber.App, cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig, - galleryService *services.GalleryService, - auth func(*fiber.Ctx) error) { + galleryService *services.GalleryService) { app.Get("/swagger/*", swagger.HandlerDefault) // default // LocalAI API endpoints if !appConfig.DisableGalleryEndpoint { modelGalleryEndpointService := localai.CreateModelGalleryEndpointService(appConfig.Galleries, appConfig.ModelPath, galleryService) - app.Post("/models/apply", auth, modelGalleryEndpointService.ApplyModelGalleryEndpoint()) - app.Post("/models/delete/:name", auth, modelGalleryEndpointService.DeleteModelGalleryEndpoint()) + app.Post("/models/apply", modelGalleryEndpointService.ApplyModelGalleryEndpoint()) + app.Post("/models/delete/:name", modelGalleryEndpointService.DeleteModelGalleryEndpoint()) - app.Get("/models/available", auth, modelGalleryEndpointService.ListModelFromGalleryEndpoint()) - app.Get("/models/galleries", auth, modelGalleryEndpointService.ListModelGalleriesEndpoint()) - app.Post("/models/galleries", auth, modelGalleryEndpointService.AddModelGalleryEndpoint()) - app.Delete("/models/galleries", auth, modelGalleryEndpointService.RemoveModelGalleryEndpoint()) - app.Get("/models/jobs/:uuid", auth, modelGalleryEndpointService.GetOpStatusEndpoint()) - app.Get("/models/jobs", auth, modelGalleryEndpointService.GetAllStatusEndpoint()) + app.Get("/models/available", modelGalleryEndpointService.ListModelFromGalleryEndpoint()) + app.Get("/models/galleries", modelGalleryEndpointService.ListModelGalleriesEndpoint()) + app.Post("/models/galleries", modelGalleryEndpointService.AddModelGalleryEndpoint()) + app.Delete("/models/galleries", modelGalleryEndpointService.RemoveModelGalleryEndpoint()) + app.Get("/models/jobs/:uuid", modelGalleryEndpointService.GetOpStatusEndpoint()) + app.Get("/models/jobs", modelGalleryEndpointService.GetAllStatusEndpoint()) } - app.Post("/tts", auth, localai.TTSEndpoint(cl, ml, appConfig)) + app.Post("/tts", localai.TTSEndpoint(cl, ml, appConfig)) // Stores sl := model.NewModelLoader("") - app.Post("/stores/set", auth, localai.StoresSetEndpoint(sl, appConfig)) - app.Post("/stores/delete", auth, localai.StoresDeleteEndpoint(sl, appConfig)) - app.Post("/stores/get", auth, localai.StoresGetEndpoint(sl, appConfig)) - app.Post("/stores/find", auth, localai.StoresFindEndpoint(sl, appConfig)) + app.Post("/stores/set", localai.StoresSetEndpoint(sl, appConfig)) + app.Post("/stores/delete", localai.StoresDeleteEndpoint(sl, appConfig)) + app.Post("/stores/get", localai.StoresGetEndpoint(sl, appConfig)) + app.Post("/stores/find", localai.StoresFindEndpoint(sl, appConfig)) // Kubernetes health checks ok := func(c *fiber.Ctx) error { @@ -51,20 +50,20 @@ func RegisterLocalAIRoutes(app *fiber.App, app.Get("/healthz", ok) app.Get("/readyz", ok) - app.Get("/metrics", auth, localai.LocalAIMetricsEndpoint()) + app.Get("/metrics", localai.LocalAIMetricsEndpoint()) // Experimental Backend Statistics Module backendMonitorService := services.NewBackendMonitorService(ml, cl, appConfig) // Split out for now - app.Get("/backend/monitor", auth, localai.BackendMonitorEndpoint(backendMonitorService)) - app.Post("/backend/shutdown", auth, localai.BackendShutdownEndpoint(backendMonitorService)) + app.Get("/backend/monitor", localai.BackendMonitorEndpoint(backendMonitorService)) + app.Post("/backend/shutdown", localai.BackendShutdownEndpoint(backendMonitorService)) // p2p if p2p.IsP2PEnabled() { - app.Get("/api/p2p", auth, localai.ShowP2PNodes(appConfig)) - app.Get("/api/p2p/token", auth, localai.ShowP2PToken(appConfig)) + app.Get("/api/p2p", localai.ShowP2PNodes(appConfig)) + app.Get("/api/p2p/token", localai.ShowP2PToken(appConfig)) } - app.Get("/version", auth, func(c *fiber.Ctx) error { + app.Get("/version", func(c *fiber.Ctx) error { return c.JSON(struct { Version string `json:"version"` }{Version: internal.PrintableVersion()}) diff --git a/core/http/routes/openai.go b/core/http/routes/openai.go index e190bc6d..081daf70 100644 --- a/core/http/routes/openai.go +++ b/core/http/routes/openai.go @@ -11,66 +11,65 @@ import ( func RegisterOpenAIRoutes(app *fiber.App, cl *config.BackendConfigLoader, ml *model.ModelLoader, - appConfig *config.ApplicationConfig, - auth func(*fiber.Ctx) error) { + appConfig *config.ApplicationConfig) { // openAI compatible API endpoint // chat - app.Post("/v1/chat/completions", auth, openai.ChatEndpoint(cl, ml, appConfig)) - app.Post("/chat/completions", auth, openai.ChatEndpoint(cl, ml, appConfig)) + app.Post("/v1/chat/completions", openai.ChatEndpoint(cl, ml, appConfig)) + app.Post("/chat/completions", openai.ChatEndpoint(cl, ml, appConfig)) // edit - app.Post("/v1/edits", auth, openai.EditEndpoint(cl, ml, appConfig)) - app.Post("/edits", auth, openai.EditEndpoint(cl, ml, appConfig)) + app.Post("/v1/edits", openai.EditEndpoint(cl, ml, appConfig)) + app.Post("/edits", openai.EditEndpoint(cl, ml, appConfig)) // assistant - app.Get("/v1/assistants", auth, openai.ListAssistantsEndpoint(cl, ml, appConfig)) - app.Get("/assistants", auth, openai.ListAssistantsEndpoint(cl, ml, appConfig)) - app.Post("/v1/assistants", auth, openai.CreateAssistantEndpoint(cl, ml, appConfig)) - app.Post("/assistants", auth, openai.CreateAssistantEndpoint(cl, ml, appConfig)) - app.Delete("/v1/assistants/:assistant_id", auth, openai.DeleteAssistantEndpoint(cl, ml, appConfig)) - app.Delete("/assistants/:assistant_id", auth, openai.DeleteAssistantEndpoint(cl, ml, appConfig)) - app.Get("/v1/assistants/:assistant_id", auth, openai.GetAssistantEndpoint(cl, ml, appConfig)) - app.Get("/assistants/:assistant_id", auth, openai.GetAssistantEndpoint(cl, ml, appConfig)) - app.Post("/v1/assistants/:assistant_id", auth, openai.ModifyAssistantEndpoint(cl, ml, appConfig)) - app.Post("/assistants/:assistant_id", auth, openai.ModifyAssistantEndpoint(cl, ml, appConfig)) - app.Get("/v1/assistants/:assistant_id/files", auth, openai.ListAssistantFilesEndpoint(cl, ml, appConfig)) - app.Get("/assistants/:assistant_id/files", auth, openai.ListAssistantFilesEndpoint(cl, ml, appConfig)) - app.Post("/v1/assistants/:assistant_id/files", auth, openai.CreateAssistantFileEndpoint(cl, ml, appConfig)) - app.Post("/assistants/:assistant_id/files", auth, openai.CreateAssistantFileEndpoint(cl, ml, appConfig)) - app.Delete("/v1/assistants/:assistant_id/files/:file_id", auth, openai.DeleteAssistantFileEndpoint(cl, ml, appConfig)) - app.Delete("/assistants/:assistant_id/files/:file_id", auth, openai.DeleteAssistantFileEndpoint(cl, ml, appConfig)) - app.Get("/v1/assistants/:assistant_id/files/:file_id", auth, openai.GetAssistantFileEndpoint(cl, ml, appConfig)) - app.Get("/assistants/:assistant_id/files/:file_id", auth, openai.GetAssistantFileEndpoint(cl, ml, appConfig)) + app.Get("/v1/assistants", openai.ListAssistantsEndpoint(cl, ml, appConfig)) + app.Get("/assistants", openai.ListAssistantsEndpoint(cl, ml, appConfig)) + app.Post("/v1/assistants", openai.CreateAssistantEndpoint(cl, ml, appConfig)) + app.Post("/assistants", openai.CreateAssistantEndpoint(cl, ml, appConfig)) + app.Delete("/v1/assistants/:assistant_id", openai.DeleteAssistantEndpoint(cl, ml, appConfig)) + app.Delete("/assistants/:assistant_id", openai.DeleteAssistantEndpoint(cl, ml, appConfig)) + app.Get("/v1/assistants/:assistant_id", openai.GetAssistantEndpoint(cl, ml, appConfig)) + app.Get("/assistants/:assistant_id", openai.GetAssistantEndpoint(cl, ml, appConfig)) + app.Post("/v1/assistants/:assistant_id", openai.ModifyAssistantEndpoint(cl, ml, appConfig)) + app.Post("/assistants/:assistant_id", openai.ModifyAssistantEndpoint(cl, ml, appConfig)) + app.Get("/v1/assistants/:assistant_id/files", openai.ListAssistantFilesEndpoint(cl, ml, appConfig)) + app.Get("/assistants/:assistant_id/files", openai.ListAssistantFilesEndpoint(cl, ml, appConfig)) + app.Post("/v1/assistants/:assistant_id/files", openai.CreateAssistantFileEndpoint(cl, ml, appConfig)) + app.Post("/assistants/:assistant_id/files", openai.CreateAssistantFileEndpoint(cl, ml, appConfig)) + app.Delete("/v1/assistants/:assistant_id/files/:file_id", openai.DeleteAssistantFileEndpoint(cl, ml, appConfig)) + app.Delete("/assistants/:assistant_id/files/:file_id", openai.DeleteAssistantFileEndpoint(cl, ml, appConfig)) + app.Get("/v1/assistants/:assistant_id/files/:file_id", openai.GetAssistantFileEndpoint(cl, ml, appConfig)) + app.Get("/assistants/:assistant_id/files/:file_id", openai.GetAssistantFileEndpoint(cl, ml, appConfig)) // files - app.Post("/v1/files", auth, openai.UploadFilesEndpoint(cl, appConfig)) - app.Post("/files", auth, openai.UploadFilesEndpoint(cl, appConfig)) - app.Get("/v1/files", auth, openai.ListFilesEndpoint(cl, appConfig)) - app.Get("/files", auth, openai.ListFilesEndpoint(cl, appConfig)) - app.Get("/v1/files/:file_id", auth, openai.GetFilesEndpoint(cl, appConfig)) - app.Get("/files/:file_id", auth, openai.GetFilesEndpoint(cl, appConfig)) - app.Delete("/v1/files/:file_id", auth, openai.DeleteFilesEndpoint(cl, appConfig)) - app.Delete("/files/:file_id", auth, openai.DeleteFilesEndpoint(cl, appConfig)) - app.Get("/v1/files/:file_id/content", auth, openai.GetFilesContentsEndpoint(cl, appConfig)) - app.Get("/files/:file_id/content", auth, openai.GetFilesContentsEndpoint(cl, appConfig)) + app.Post("/v1/files", openai.UploadFilesEndpoint(cl, appConfig)) + app.Post("/files", openai.UploadFilesEndpoint(cl, appConfig)) + app.Get("/v1/files", openai.ListFilesEndpoint(cl, appConfig)) + app.Get("/files", openai.ListFilesEndpoint(cl, appConfig)) + app.Get("/v1/files/:file_id", openai.GetFilesEndpoint(cl, appConfig)) + app.Get("/files/:file_id", openai.GetFilesEndpoint(cl, appConfig)) + app.Delete("/v1/files/:file_id", openai.DeleteFilesEndpoint(cl, appConfig)) + app.Delete("/files/:file_id", openai.DeleteFilesEndpoint(cl, appConfig)) + app.Get("/v1/files/:file_id/content", openai.GetFilesContentsEndpoint(cl, appConfig)) + app.Get("/files/:file_id/content", openai.GetFilesContentsEndpoint(cl, appConfig)) // completion - app.Post("/v1/completions", auth, openai.CompletionEndpoint(cl, ml, appConfig)) - app.Post("/completions", auth, openai.CompletionEndpoint(cl, ml, appConfig)) - app.Post("/v1/engines/:model/completions", auth, openai.CompletionEndpoint(cl, ml, appConfig)) + app.Post("/v1/completions", openai.CompletionEndpoint(cl, ml, appConfig)) + app.Post("/completions", openai.CompletionEndpoint(cl, ml, appConfig)) + app.Post("/v1/engines/:model/completions", openai.CompletionEndpoint(cl, ml, appConfig)) // embeddings - app.Post("/v1/embeddings", auth, openai.EmbeddingsEndpoint(cl, ml, appConfig)) - app.Post("/embeddings", auth, openai.EmbeddingsEndpoint(cl, ml, appConfig)) - app.Post("/v1/engines/:model/embeddings", auth, openai.EmbeddingsEndpoint(cl, ml, appConfig)) + app.Post("/v1/embeddings", openai.EmbeddingsEndpoint(cl, ml, appConfig)) + app.Post("/embeddings", openai.EmbeddingsEndpoint(cl, ml, appConfig)) + app.Post("/v1/engines/:model/embeddings", openai.EmbeddingsEndpoint(cl, ml, appConfig)) // audio - app.Post("/v1/audio/transcriptions", auth, openai.TranscriptEndpoint(cl, ml, appConfig)) - app.Post("/v1/audio/speech", auth, localai.TTSEndpoint(cl, ml, appConfig)) + app.Post("/v1/audio/transcriptions", openai.TranscriptEndpoint(cl, ml, appConfig)) + app.Post("/v1/audio/speech", localai.TTSEndpoint(cl, ml, appConfig)) // images - app.Post("/v1/images/generations", auth, openai.ImageEndpoint(cl, ml, appConfig)) + app.Post("/v1/images/generations", openai.ImageEndpoint(cl, ml, appConfig)) if appConfig.ImageDir != "" { app.Static("/generated-images", appConfig.ImageDir) @@ -81,6 +80,6 @@ func RegisterOpenAIRoutes(app *fiber.App, } // List models - app.Get("/v1/models", auth, openai.ListModelsEndpoint(cl, ml)) - app.Get("/models", auth, openai.ListModelsEndpoint(cl, ml)) + app.Get("/v1/models", openai.ListModelsEndpoint(cl, ml)) + app.Get("/models", openai.ListModelsEndpoint(cl, ml)) } diff --git a/core/http/routes/ui.go b/core/http/routes/ui.go index 6dfb3f43..7b2c6ae7 100644 --- a/core/http/routes/ui.go +++ b/core/http/routes/ui.go @@ -59,8 +59,7 @@ func RegisterUIRoutes(app *fiber.App, cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig, - galleryService *services.GalleryService, - auth func(*fiber.Ctx) error) { + galleryService *services.GalleryService) { // keeps the state of models that are being installed from the UI var processingModels = NewModelOpCache() @@ -85,10 +84,10 @@ func RegisterUIRoutes(app *fiber.App, return processingModelsData, taskTypes } - app.Get("/", auth, localai.WelcomeEndpoint(appConfig, cl, ml, modelStatus)) + app.Get("/", localai.WelcomeEndpoint(appConfig, cl, ml, modelStatus)) if p2p.IsP2PEnabled() { - app.Get("/p2p", auth, func(c *fiber.Ctx) error { + app.Get("/p2p", func(c *fiber.Ctx) error { summary := fiber.Map{ "Title": "LocalAI - P2P dashboard", "Version": internal.PrintableVersion(), @@ -104,17 +103,17 @@ func RegisterUIRoutes(app *fiber.App, }) /* show nodes live! */ - app.Get("/p2p/ui/workers", auth, func(c *fiber.Ctx) error { + app.Get("/p2p/ui/workers", func(c *fiber.Ctx) error { return c.SendString(elements.P2PNodeBoxes(p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.WorkerID)))) }) - app.Get("/p2p/ui/workers-federation", auth, func(c *fiber.Ctx) error { + app.Get("/p2p/ui/workers-federation", func(c *fiber.Ctx) error { return c.SendString(elements.P2PNodeBoxes(p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.FederatedID)))) }) - app.Get("/p2p/ui/workers-stats", auth, func(c *fiber.Ctx) error { + app.Get("/p2p/ui/workers-stats", func(c *fiber.Ctx) error { return c.SendString(elements.P2PNodeStats(p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.WorkerID)))) }) - app.Get("/p2p/ui/workers-federation-stats", auth, func(c *fiber.Ctx) error { + app.Get("/p2p/ui/workers-federation-stats", func(c *fiber.Ctx) error { return c.SendString(elements.P2PNodeStats(p2p.GetAvailableNodes(p2p.NetworkID(appConfig.P2PNetworkID, p2p.FederatedID)))) }) } @@ -122,7 +121,7 @@ func RegisterUIRoutes(app *fiber.App, if !appConfig.DisableGalleryEndpoint { // Show the Models page (all models) - app.Get("/browse", auth, func(c *fiber.Ctx) error { + app.Get("/browse", func(c *fiber.Ctx) error { term := c.Query("term") models, _ := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.ModelPath) @@ -167,7 +166,7 @@ func RegisterUIRoutes(app *fiber.App, // Show the models, filtered from the user input // https://htmx.org/examples/active-search/ - app.Post("/browse/search/models", auth, func(c *fiber.Ctx) error { + app.Post("/browse/search/models", func(c *fiber.Ctx) error { form := struct { Search string `form:"search"` }{} @@ -188,7 +187,7 @@ func RegisterUIRoutes(app *fiber.App, // This route is used when the "Install" button is pressed, we submit here a new job to the gallery service // https://htmx.org/examples/progress-bar/ - app.Post("/browse/install/model/:id", auth, func(c *fiber.Ctx) error { + app.Post("/browse/install/model/:id", func(c *fiber.Ctx) error { galleryID := strings.Clone(c.Params("id")) // note: strings.Clone is required for multiple requests! log.Debug().Msgf("UI job submitted to install : %+v\n", galleryID) @@ -215,7 +214,7 @@ func RegisterUIRoutes(app *fiber.App, // This route is used when the "Install" button is pressed, we submit here a new job to the gallery service // https://htmx.org/examples/progress-bar/ - app.Post("/browse/delete/model/:id", auth, func(c *fiber.Ctx) error { + app.Post("/browse/delete/model/:id", func(c *fiber.Ctx) error { galleryID := strings.Clone(c.Params("id")) // note: strings.Clone is required for multiple requests! log.Debug().Msgf("UI job submitted to delete : %+v\n", galleryID) var galleryName = galleryID @@ -255,7 +254,7 @@ func RegisterUIRoutes(app *fiber.App, // Display the job current progress status // If the job is done, we trigger the /browse/job/:uid route // https://htmx.org/examples/progress-bar/ - app.Get("/browse/job/progress/:uid", auth, func(c *fiber.Ctx) error { + app.Get("/browse/job/progress/:uid", func(c *fiber.Ctx) error { jobUID := strings.Clone(c.Params("uid")) // note: strings.Clone is required for multiple requests! status := galleryService.GetStatus(jobUID) @@ -279,7 +278,7 @@ func RegisterUIRoutes(app *fiber.App, // this route is hit when the job is done, and we display the // final state (for now just displays "Installation completed") - app.Get("/browse/job/:uid", auth, func(c *fiber.Ctx) error { + app.Get("/browse/job/:uid", func(c *fiber.Ctx) error { jobUID := strings.Clone(c.Params("uid")) // note: strings.Clone is required for multiple requests! status := galleryService.GetStatus(jobUID) @@ -303,7 +302,7 @@ func RegisterUIRoutes(app *fiber.App, } // Show the Chat page - app.Get("/chat/:model", auth, func(c *fiber.Ctx) error { + app.Get("/chat/:model", func(c *fiber.Ctx) error { backendConfigs, _ := services.ListModels(cl, ml, "", true) summary := fiber.Map{ @@ -318,7 +317,7 @@ func RegisterUIRoutes(app *fiber.App, return c.Render("views/chat", summary) }) - app.Get("/talk/", auth, func(c *fiber.Ctx) error { + app.Get("/talk/", func(c *fiber.Ctx) error { backendConfigs, _ := services.ListModels(cl, ml, "", true) if len(backendConfigs) == 0 { @@ -338,7 +337,7 @@ func RegisterUIRoutes(app *fiber.App, return c.Render("views/talk", summary) }) - app.Get("/chat/", auth, func(c *fiber.Ctx) error { + app.Get("/chat/", func(c *fiber.Ctx) error { backendConfigs, _ := services.ListModels(cl, ml, "", true) @@ -359,7 +358,7 @@ func RegisterUIRoutes(app *fiber.App, return c.Render("views/chat", summary) }) - app.Get("/text2image/:model", auth, func(c *fiber.Ctx) error { + app.Get("/text2image/:model", func(c *fiber.Ctx) error { backendConfigs := cl.GetAllBackendConfigs() summary := fiber.Map{ @@ -374,7 +373,7 @@ func RegisterUIRoutes(app *fiber.App, return c.Render("views/text2image", summary) }) - app.Get("/text2image/", auth, func(c *fiber.Ctx) error { + app.Get("/text2image/", func(c *fiber.Ctx) error { backendConfigs := cl.GetAllBackendConfigs() @@ -395,7 +394,7 @@ func RegisterUIRoutes(app *fiber.App, return c.Render("views/text2image", summary) }) - app.Get("/tts/:model", auth, func(c *fiber.Ctx) error { + app.Get("/tts/:model", func(c *fiber.Ctx) error { backendConfigs := cl.GetAllBackendConfigs() summary := fiber.Map{ @@ -410,7 +409,7 @@ func RegisterUIRoutes(app *fiber.App, return c.Render("views/tts", summary) }) - app.Get("/tts/", auth, func(c *fiber.Ctx) error { + app.Get("/tts/", func(c *fiber.Ctx) error { backendConfigs := cl.GetAllBackendConfigs() diff --git a/go.mod b/go.mod index 57202ad2..a3359abf 100644 --- a/go.mod +++ b/go.mod @@ -74,6 +74,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/dave-gray101/v2keyauth v0.0.0-20240624150259-c45d584d25e2 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect diff --git a/go.sum b/go.sum index ab64b84a..1dd44a5b 100644 --- a/go.sum +++ b/go.sum @@ -110,6 +110,8 @@ github.com/creachadair/otp v0.4.2 h1:ngNMaD6Tzd7UUNRFyed7ykZFn/Wr5sSs5ffqZWm9pu8 github.com/creachadair/otp v0.4.2/go.mod h1:DqV9hJyUbcUme0pooYfiFvvMe72Aua5sfhNzwfZvk40= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/dave-gray101/v2keyauth v0.0.0-20240624150259-c45d584d25e2 h1:flLYmnQFZNo04x2NPehMbf30m7Pli57xwZ0NFqR/hb0= +github.com/dave-gray101/v2keyauth v0.0.0-20240624150259-c45d584d25e2/go.mod h1:NtWqRzAp/1tw+twkW8uuBenEVVYndEAZACWU3F3xdoQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From e95cb8eaacdac6426c085197ec5acf790206c042 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 03:33:52 +0000 Subject: [PATCH 0124/1530] chore(deps): Bump setuptools from 69.5.1 to 75.1.0 in /backend/python/transformers (#3579) chore(deps): Bump setuptools in /backend/python/transformers Bumps [setuptools](https://github.com/pypa/setuptools) from 69.5.1 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v69.5.1...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/transformers/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index b19c59c0..1b7ebda5 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ grpcio==1.66.1 protobuf certifi -setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From f4b1bd8f6d70365e99320e52119cb7ed577b63c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 03:41:01 +0000 Subject: [PATCH 0125/1530] chore(deps): Bump setuptools from 70.3.0 to 75.1.0 in /backend/python/vllm (#3580) chore(deps): Bump setuptools in /backend/python/vllm Bumps [setuptools](https://github.com/pypa/setuptools) from 70.3.0 to 75.1.0. - [Release notes](https://github.com/pypa/setuptools/releases) - [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/setuptools/compare/v70.3.0...v75.1.0) --- updated-dependencies: - dependency-name: setuptools dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/vllm/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/vllm/requirements-intel.txt b/backend/python/vllm/requirements-intel.txt index 7903282e..1f82c46e 100644 --- a/backend/python/vllm/requirements-intel.txt +++ b/backend/python/vllm/requirements-intel.txt @@ -4,4 +4,4 @@ accelerate torch transformers optimum[openvino] -setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From 0e4e101101e92cd6b2451cf71a2f85a880468183 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 17 Sep 2024 05:52:15 +0200 Subject: [PATCH 0126/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `23e0d70bacaaca1429d365a44aa9e7434f17823b` (#3581) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e4d5b22c..f9fa5476 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=6262d13e0b2da91f230129a93a996609a2f5a2f2 +CPPLLAMA_VERSION?=23e0d70bacaaca1429d365a44aa9e7434f17823b # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From d0f2bf318103f631686c648d6bb6a299bca15976 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 17 Sep 2024 06:50:57 +0200 Subject: [PATCH 0127/1530] fix(shutdown): do not shutdown immediately busy backends (#3543) * fix(shutdown): do not shutdown immediately busy backends Signed-off-by: Ettore Di Giacinto * chore(refactor): avoid duplicate functions Signed-off-by: Ettore Di Giacinto * fix: multiplicative backoff for shutdown (#3547) * multiplicative backoff for shutdown Rather than always retry every two seconds, back off the shutdown attempt rate? Signed-off-by: Dave * Update loader.go Signed-off-by: Dave * add clamp of 2 minutes Signed-off-by: Dave Lee --------- Signed-off-by: Dave Signed-off-by: Dave Lee --------- Signed-off-by: Ettore Di Giacinto Signed-off-by: Dave Signed-off-by: Dave Lee Co-authored-by: Dave --- pkg/model/loader.go | 24 +++++++++++++++++------- pkg/model/process.go | 17 +++++++++-------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/pkg/model/loader.go b/pkg/model/loader.go index 90fda35f..b9865f73 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -69,6 +69,8 @@ var knownModelsNameSuffixToSkip []string = []string{ ".tar.gz", } +const retryTimeout = time.Duration(2 * time.Minute) + func (ml *ModelLoader) ListFilesInModelPath() ([]string, error) { files, err := os.ReadDir(ml.ModelPath) if err != nil { @@ -146,15 +148,23 @@ func (ml *ModelLoader) ShutdownModel(modelName string) error { ml.mu.Lock() defer ml.mu.Unlock() - return ml.stopModel(modelName) -} - -func (ml *ModelLoader) stopModel(modelName string) error { - defer ml.deleteProcess(modelName) - if _, ok := ml.models[modelName]; !ok { + _, ok := ml.models[modelName] + if !ok { return fmt.Errorf("model %s not found", modelName) } - return nil + + retries := 1 + for ml.models[modelName].GRPC(false, ml.wd).IsBusy() { + log.Debug().Msgf("%s busy. Waiting.", modelName) + dur := time.Duration(retries*2) * time.Second + if dur > retryTimeout { + dur = retryTimeout + } + time.Sleep(dur) + retries++ + } + + return ml.deleteProcess(modelName) } func (ml *ModelLoader) CheckIsLoaded(s string) *Model { diff --git a/pkg/model/process.go b/pkg/model/process.go index 5b751de8..50afbb1c 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -18,15 +18,16 @@ import ( func (ml *ModelLoader) StopAllExcept(s string) error { return ml.StopGRPC(func(id string, p *process.Process) bool { - if id != s { - for ml.models[id].GRPC(false, ml.wd).IsBusy() { - log.Debug().Msgf("%s busy. Waiting.", id) - time.Sleep(2 * time.Second) - } - log.Debug().Msgf("[single-backend] Stopping %s", id) - return true + if id == s { + return false } - return false + + for ml.models[id].GRPC(false, ml.wd).IsBusy() { + log.Debug().Msgf("%s busy. Waiting.", id) + time.Sleep(2 * time.Second) + } + log.Debug().Msgf("[single-backend] Stopping %s", id) + return true }) } From 22247ad92c65818d6fb751a2f9998b565190db7f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 05:50:31 +0000 Subject: [PATCH 0128/1530] chore(deps): Bump langchain from 0.2.16 to 0.3.0 in /examples/langchain-chroma (#3557) chore(deps): Bump langchain in /examples/langchain-chroma Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.16 to 0.3.0. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.16...langchain==0.3.0) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 3edb570c..4884d4aa 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ -langchain==0.2.16 +langchain==0.3.0 openai==1.45.1 chromadb==0.5.5 llama-index==0.11.7 \ No newline at end of file From 4a4e44bf5559f2eac49df2c1135f39ad6d70300f Mon Sep 17 00:00:00 2001 From: Alexander Izotov <93216976+Nyralei@users.noreply.github.com> Date: Tue, 17 Sep 2024 08:52:37 +0300 Subject: [PATCH 0129/1530] feat: allow setting trust_remote_code for sentencetransformers backend (#3552) Allow setting trust_remote_code for SentenceTransformers backend Signed-off-by: Nyralei <93216976+Nyralei@users.noreply.github.com> --- backend/python/sentencetransformers/backend.py | 2 +- backend/python/sentencetransformers/requirements.txt | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/python/sentencetransformers/backend.py b/backend/python/sentencetransformers/backend.py index 905015e1..2a20bf60 100755 --- a/backend/python/sentencetransformers/backend.py +++ b/backend/python/sentencetransformers/backend.py @@ -55,7 +55,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): """ model_name = request.Model try: - self.model = SentenceTransformer(model_name) + self.model = SentenceTransformer(model_name, trust_remote_code=request.TrustRemoteCode) except Exception as err: return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") diff --git a/backend/python/sentencetransformers/requirements.txt b/backend/python/sentencetransformers/requirements.txt index 8e1b0195..b9cb6061 100644 --- a/backend/python/sentencetransformers/requirements.txt +++ b/backend/python/sentencetransformers/requirements.txt @@ -1,3 +1,5 @@ grpcio==1.66.1 protobuf -certifi \ No newline at end of file +certifi +datasets +einops \ No newline at end of file From 46fd4ff6db3aedaec3579872aa35d47973417b0a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 06:19:52 +0000 Subject: [PATCH 0130/1530] chore(deps): Bump openai from 1.44.0 to 1.45.1 in /examples/functions (#3560) Bumps [openai](https://github.com/openai/openai-python) from 1.44.0 to 1.45.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.44.0...v1.45.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 9dd6818f..670090d3 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.0 -openai==1.44.0 +openai==1.45.1 From 075e5015c0ff0ca1010d5bba11a774c1564a8795 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 17 Sep 2024 09:06:07 +0200 Subject: [PATCH 0131/1530] Revert "chore(deps): Bump setuptools from 69.5.1 to 75.1.0 in /backend/python/transformers" (#3586) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert "chore(deps): Bump setuptools from 69.5.1 to 75.1.0 in /backend/python…" This reverts commit e95cb8eaacdac6426c085197ec5acf790206c042. --- backend/python/transformers/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index 1b7ebda5..b19c59c0 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ grpcio==1.66.1 protobuf certifi -setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file From 92136a5d342993bdc8e0a26d5498b2e65ce9d26e Mon Sep 17 00:00:00 2001 From: Dave Date: Tue, 17 Sep 2024 03:23:58 -0400 Subject: [PATCH 0132/1530] fix: `gallery/index.yaml` comment spacing (#3585) extremely minor fix: add a space to index.yaml for the scanner Signed-off-by: Dave Lee --- gallery/index.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 5e47d31c..229697bb 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1281,7 +1281,7 @@ - !!merge <<: *mistral03 name: "mn-12b-lyra-v4-iq-imatrix" icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/dVoru83WOpwVjMlgZ_xhA.png - #chatml + # chatml url: "github:mudler/LocalAI/gallery/chatml.yaml@master" urls: - https://huggingface.co/Lewdiculous/MN-12B-Lyra-v4-GGUF-IQ-Imatrix From 504962938127a04590e2e2383b2d5933ef3b48fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:24:01 +0200 Subject: [PATCH 0133/1530] chore(deps): Bump langchain from 0.2.16 to 0.3.0 in /examples/langchain/langchainpy-localai-example (#3577) chore(deps): Bump langchain Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.2.16 to 0.3.0. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.2.16...langchain==0.3.0) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 1bd6b841..213b4e2f 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -10,7 +10,7 @@ debugpy==1.8.2 frozenlist==1.4.1 greenlet==3.1.0 idna==3.8 -langchain==0.2.16 +langchain==0.3.0 langchain-community==0.2.16 marshmallow==3.22.0 marshmallow-enum==1.5.1 From 8826ca93b3b23d2d9333856b136bc606e92710ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:24:14 +0200 Subject: [PATCH 0134/1530] chore(deps): Bump openai from 1.44.0 to 1.45.1 in /examples/langchain/langchainpy-localai-example (#3573) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.44.0 to 1.45.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.44.0...v1.45.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 213b4e2f..98325db3 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.0.5 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.1 -openai==1.44.0 +openai==1.45.1 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.8.2 From eee1fb2c75171fc4a236bf224eda5c0df3d1fa3f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:24:34 +0200 Subject: [PATCH 0135/1530] chore(deps): Bump pypinyin from 0.50.0 to 0.53.0 in /backend/python/openvoice (#3562) chore(deps): Bump pypinyin in /backend/python/openvoice Bumps [pypinyin](https://github.com/mozillazg/python-pinyin) from 0.50.0 to 0.53.0. - [Release notes](https://github.com/mozillazg/python-pinyin/releases) - [Changelog](https://github.com/mozillazg/python-pinyin/blob/master/CHANGELOG.rst) - [Commits](https://github.com/mozillazg/python-pinyin/compare/v0.50.0...v0.53.0) --- updated-dependencies: - dependency-name: pypinyin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/openvoice/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index a9a4cc20..cea7de0b 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -15,7 +15,7 @@ unidecode==1.3.7 whisper-timestamped==1.15.4 openai python-dotenv -pypinyin==0.50.0 +pypinyin==0.53.0 cn2an==0.5.22 jieba==0.42.1 gradio==4.38.1 From a53392f91953bf53c77041a8cd25282cd65eb71a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 17 Sep 2024 16:51:40 +0200 Subject: [PATCH 0136/1530] chore(refactor): drop duplicated shutdown logics (#3589) * chore(refactor): drop duplicated shutdown logics - Handle locking in Shutdown and CheckModelIsLoaded in a more go-idiomatic way - Drop duplicated code and re-organize shutdown code Signed-off-by: Ettore Di Giacinto * fix: drop leftover Signed-off-by: Ettore Di Giacinto * chore: improve logging and add missing locks Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- core/http/routes/localai.go | 2 +- pkg/model/filters.go | 17 +++++++++++++++++ pkg/model/initializers.go | 16 ++++++---------- pkg/model/loader.go | 7 ++++--- pkg/model/process.go | 28 ++++------------------------ 5 files changed, 32 insertions(+), 38 deletions(-) create mode 100644 pkg/model/filters.go diff --git a/core/http/routes/localai.go b/core/http/routes/localai.go index 29fef378..247596c0 100644 --- a/core/http/routes/localai.go +++ b/core/http/routes/localai.go @@ -69,6 +69,6 @@ func RegisterLocalAIRoutes(app *fiber.App, }{Version: internal.PrintableVersion()}) }) - app.Get("/system", auth, localai.SystemInformations(ml, appConfig)) + app.Get("/system", localai.SystemInformations(ml, appConfig)) } diff --git a/pkg/model/filters.go b/pkg/model/filters.go new file mode 100644 index 00000000..79b72d5b --- /dev/null +++ b/pkg/model/filters.go @@ -0,0 +1,17 @@ +package model + +import ( + process "github.com/mudler/go-processmanager" +) + +type GRPCProcessFilter = func(id string, p *process.Process) bool + +func all(_ string, _ *process.Process) bool { + return true +} + +func allExcept(s string) GRPCProcessFilter { + return func(id string, p *process.Process) bool { + return id != s + } +} diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 3d2255cc..7099bf33 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -320,7 +320,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string } else { grpcProcess := backendPath(o.assetDir, backend) if err := utils.VerifyPath(grpcProcess, o.assetDir); err != nil { - return nil, fmt.Errorf("grpc process not found in assetdir: %s", err.Error()) + return nil, fmt.Errorf("refering to a backend not in asset dir: %s", err.Error()) } if autoDetect { @@ -332,7 +332,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string // Check if the file exists if _, err := os.Stat(grpcProcess); os.IsNotExist(err) { - return nil, fmt.Errorf("grpc process not found: %s. some backends(stablediffusion, tts) require LocalAI compiled with GO_TAGS", grpcProcess) + return nil, fmt.Errorf("backend not found: %s", grpcProcess) } serverAddress, err := getFreeAddress() @@ -355,6 +355,8 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string client = NewModel(serverAddress) } + log.Debug().Msgf("Wait for the service to start up") + // Wait for the service to start up ready := false for i := 0; i < o.grpcAttempts; i++ { @@ -413,10 +415,8 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e } if o.singleActiveBackend { - ml.mu.Lock() log.Debug().Msgf("Stopping all backends except '%s'", o.model) - err := ml.StopAllExcept(o.model) - ml.mu.Unlock() + err := ml.StopGRPC(allExcept(o.model)) if err != nil { log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel") return nil, err @@ -444,13 +444,10 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { o := NewOptions(opts...) - ml.mu.Lock() - // Return earlier if we have a model already loaded // (avoid looping through all the backends) if m := ml.CheckIsLoaded(o.model); m != nil { log.Debug().Msgf("Model '%s' already loaded", o.model) - ml.mu.Unlock() return m.GRPC(o.parallelRequests, ml.wd), nil } @@ -458,12 +455,11 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { // If we can have only one backend active, kill all the others (except external backends) if o.singleActiveBackend { log.Debug().Msgf("Stopping all backends except '%s'", o.model) - err := ml.StopAllExcept(o.model) + err := ml.StopGRPC(allExcept(o.model)) if err != nil { log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing") } } - ml.mu.Unlock() var err error diff --git a/pkg/model/loader.go b/pkg/model/loader.go index b9865f73..f70d2cea 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -118,9 +118,6 @@ func (ml *ModelLoader) ListModels() []*Model { } func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) (*Model, error)) (*Model, error) { - ml.mu.Lock() - defer ml.mu.Unlock() - // Check if we already have a loaded model if model := ml.CheckIsLoaded(modelName); model != nil { return model, nil @@ -139,6 +136,8 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( return nil, fmt.Errorf("loader didn't return a model") } + ml.mu.Lock() + defer ml.mu.Unlock() ml.models[modelName] = model return model, nil @@ -168,6 +167,8 @@ func (ml *ModelLoader) ShutdownModel(modelName string) error { } func (ml *ModelLoader) CheckIsLoaded(s string) *Model { + ml.mu.Lock() + defer ml.mu.Unlock() m, ok := ml.models[s] if !ok { return nil diff --git a/pkg/model/process.go b/pkg/model/process.go index 50afbb1c..bcd1fccb 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -9,28 +9,12 @@ import ( "strconv" "strings" "syscall" - "time" "github.com/hpcloud/tail" process "github.com/mudler/go-processmanager" "github.com/rs/zerolog/log" ) -func (ml *ModelLoader) StopAllExcept(s string) error { - return ml.StopGRPC(func(id string, p *process.Process) bool { - if id == s { - return false - } - - for ml.models[id].GRPC(false, ml.wd).IsBusy() { - log.Debug().Msgf("%s busy. Waiting.", id) - time.Sleep(2 * time.Second) - } - log.Debug().Msgf("[single-backend] Stopping %s", id) - return true - }) -} - func (ml *ModelLoader) deleteProcess(s string) error { if _, exists := ml.grpcProcesses[s]; exists { if err := ml.grpcProcesses[s].Stop(); err != nil { @@ -42,17 +26,11 @@ func (ml *ModelLoader) deleteProcess(s string) error { return nil } -type GRPCProcessFilter = func(id string, p *process.Process) bool - -func includeAllProcesses(_ string, _ *process.Process) bool { - return true -} - func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) error { var err error = nil for k, p := range ml.grpcProcesses { if filter(k, p) { - e := ml.deleteProcess(k) + e := ml.ShutdownModel(k) err = errors.Join(err, e) } } @@ -60,10 +38,12 @@ func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) error { } func (ml *ModelLoader) StopAllGRPC() error { - return ml.StopGRPC(includeAllProcesses) + return ml.StopGRPC(all) } func (ml *ModelLoader) GetGRPCPID(id string) (int, error) { + ml.mu.Lock() + defer ml.mu.Unlock() p, exists := ml.grpcProcesses[id] if !exists { return -1, fmt.Errorf("no grpc backend found for %s", id) From acf119828f940083451f8faa3095a5d3804ebd78 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 17 Sep 2024 17:22:56 +0200 Subject: [PATCH 0137/1530] Revert "chore(deps): Bump securego/gosec from 2.21.0 to 2.21.2" (#3590) Revert "chore(deps): Bump securego/gosec from 2.21.0 to 2.21.2 (#3561)" This reverts commit 12a8d0e46fbd03f8d550dc41ea6325d07d66cd00. --- .github/workflows/secscan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/secscan.yaml b/.github/workflows/secscan.yaml index 08d7dfc6..db9db586 100644 --- a/.github/workflows/secscan.yaml +++ b/.github/workflows/secscan.yaml @@ -18,7 +18,7 @@ jobs: if: ${{ github.actor != 'dependabot[bot]' }} - name: Run Gosec Security Scanner if: ${{ github.actor != 'dependabot[bot]' }} - uses: securego/gosec@v2.21.2 + uses: securego/gosec@v2.21.0 with: # we let the report trigger content trigger a failure using the GitHub Security features. args: '-no-fail -fmt sarif -out results.sarif ./...' From dc98b2ea4474c62fbf834b421663239d6b93f534 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 17 Sep 2024 23:51:41 +0200 Subject: [PATCH 0138/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `8b836ae731bbb2c5640bc47df5b0a78ffcb129cb` (#3591) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f9fa5476..4493404e 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=23e0d70bacaaca1429d365a44aa9e7434f17823b +CPPLLAMA_VERSION?=8b836ae731bbb2c5640bc47df5b0a78ffcb129cb # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From e5bd74878e79b2dd819c58d9811f9573bb3c9594 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 18 Sep 2024 00:02:02 +0200 Subject: [PATCH 0139/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `5b1ce40fa882e9cb8630b48032067a1ed2f1534f` (#3592) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4493404e..54ae7b73 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=049b3a0e53c8a8e4c4576c06a1a4fccf0063a73f +WHISPER_CPP_VERSION?=5b1ce40fa882e9cb8630b48032067a1ed2f1534f # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From a50cde69a258405ad765d3f6adf6a03aaaa6776a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 18 Sep 2024 15:55:46 +0200 Subject: [PATCH 0140/1530] chore(aio): rename gpt-4-vision-preview to gpt-4o (#3597) Fixes: 3596 Signed-off-by: Ettore Di Giacinto --- aio/cpu/vision.yaml | 2 +- aio/gpu-8g/vision.yaml | 2 +- aio/intel/vision.yaml | 2 +- tests/e2e-aio/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aio/cpu/vision.yaml b/aio/cpu/vision.yaml index 3b466d37..4052fa39 100644 --- a/aio/cpu/vision.yaml +++ b/aio/cpu/vision.yaml @@ -2,7 +2,7 @@ backend: llama-cpp context_size: 4096 f16: true mmap: true -name: gpt-4-vision-preview +name: gpt-4o roles: user: "USER:" diff --git a/aio/gpu-8g/vision.yaml b/aio/gpu-8g/vision.yaml index db039279..4f5e10b3 100644 --- a/aio/gpu-8g/vision.yaml +++ b/aio/gpu-8g/vision.yaml @@ -2,7 +2,7 @@ backend: llama-cpp context_size: 4096 f16: true mmap: true -name: gpt-4-vision-preview +name: gpt-4o roles: user: "USER:" diff --git a/aio/intel/vision.yaml b/aio/intel/vision.yaml index 52843162..37067362 100644 --- a/aio/intel/vision.yaml +++ b/aio/intel/vision.yaml @@ -2,7 +2,7 @@ backend: llama-cpp context_size: 4096 mmap: false f16: false -name: gpt-4-vision-preview +name: gpt-4o roles: user: "USER:" diff --git a/tests/e2e-aio/e2e_test.go b/tests/e2e-aio/e2e_test.go index f3f7b106..36d127d2 100644 --- a/tests/e2e-aio/e2e_test.go +++ b/tests/e2e-aio/e2e_test.go @@ -171,7 +171,7 @@ var _ = Describe("E2E test", func() { }) Context("vision", func() { It("correctly", func() { - model := "gpt-4-vision-preview" + model := "gpt-4o" resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{ Model: model, Messages: []openai.ChatCompletionMessage{ From c6a819e92fc7e687f6fe9c8a29f5b56b62820163 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 18 Sep 2024 23:41:59 +0200 Subject: [PATCH 0141/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `64c6af3195c3cd4aa3328a1282d29cd2635c34c9` (#3598) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 54ae7b73..286f4b5a 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=8b836ae731bbb2c5640bc47df5b0a78ffcb129cb +CPPLLAMA_VERSION?=64c6af3195c3cd4aa3328a1282d29cd2635c34c9 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From fbb9facda40eb9442ef0819b5a2de13500019229 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 19 Sep 2024 11:21:59 +0200 Subject: [PATCH 0142/1530] feat(api): allow to pass videos to backends (#3601) This prepares the API to receive videos as well for video understanding. It works similarly to images, where the request should be in the form: { "type": "video_url", "video_url": { "url": "url or base64 data" } } Signed-off-by: Ettore Di Giacinto --- backend/backend.proto | 1 + core/backend/llm.go | 3 +- core/http/endpoints/openai/chat.go | 6 +++- core/http/endpoints/openai/inference.go | 6 +++- core/http/endpoints/openai/request.go | 38 +++++++++++++++++-------- core/schema/openai.go | 2 ++ pkg/utils/base64.go | 10 ++----- pkg/utils/base64_test.go | 8 +++--- 8 files changed, 47 insertions(+), 27 deletions(-) diff --git a/backend/backend.proto b/backend/backend.proto index 4a8f31a9..6ef83567 100644 --- a/backend/backend.proto +++ b/backend/backend.proto @@ -134,6 +134,7 @@ message PredictOptions { repeated string Images = 42; bool UseTokenizerTemplate = 43; repeated Message Messages = 44; + repeated string Videos = 45; } // The response message containing the result diff --git a/core/backend/llm.go b/core/backend/llm.go index 2b4564a8..fa4c0709 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -31,7 +31,7 @@ type TokenUsage struct { Completion int } -func ModelInference(ctx context.Context, s string, messages []schema.Message, images []string, loader *model.ModelLoader, c config.BackendConfig, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) { +func ModelInference(ctx context.Context, s string, messages []schema.Message, images, videos []string, loader *model.ModelLoader, c config.BackendConfig, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) { modelFile := c.Model threads := c.Threads if *threads == 0 && o.Threads != 0 { @@ -101,6 +101,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im opts.Messages = protoMessages opts.UseTokenizerTemplate = c.TemplateConfig.UseTokenizerTemplate opts.Images = images + opts.Videos = videos tokenUsage := TokenUsage{} diff --git a/core/http/endpoints/openai/chat.go b/core/http/endpoints/openai/chat.go index 8144bdcd..742a4add 100644 --- a/core/http/endpoints/openai/chat.go +++ b/core/http/endpoints/openai/chat.go @@ -640,8 +640,12 @@ func handleQuestion(config *config.BackendConfig, input *schema.OpenAIRequest, m for _, m := range input.Messages { images = append(images, m.StringImages...) } + videos := []string{} + for _, m := range input.Messages { + videos = append(videos, m.StringVideos...) + } - predFunc, err := backend.ModelInference(input.Context, prompt, input.Messages, images, ml, *config, o, nil) + predFunc, err := backend.ModelInference(input.Context, prompt, input.Messages, images, videos, ml, *config, o, nil) if err != nil { log.Error().Err(err).Msg("model inference failed") return "", err diff --git a/core/http/endpoints/openai/inference.go b/core/http/endpoints/openai/inference.go index 4950ce20..4008ba3d 100644 --- a/core/http/endpoints/openai/inference.go +++ b/core/http/endpoints/openai/inference.go @@ -27,9 +27,13 @@ func ComputeChoices( for _, m := range req.Messages { images = append(images, m.StringImages...) } + videos := []string{} + for _, m := range req.Messages { + videos = append(videos, m.StringVideos...) + } // get the model function to call for the result - predFunc, err := backend.ModelInference(req.Context, predInput, req.Messages, images, loader, *config, o, tokenCallback) + predFunc, err := backend.ModelInference(req.Context, predInput, req.Messages, images, videos, loader, *config, o, tokenCallback) if err != nil { return result, backend.TokenUsage{}, err } diff --git a/core/http/endpoints/openai/request.go b/core/http/endpoints/openai/request.go index a99ebea2..456a1e0c 100644 --- a/core/http/endpoints/openai/request.go +++ b/core/http/endpoints/openai/request.go @@ -135,7 +135,7 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque } // Decode each request's message content - index := 0 + imgIndex, vidIndex := 0, 0 for i, m := range input.Messages { switch content := m.Content.(type) { case string: @@ -144,20 +144,34 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque dat, _ := json.Marshal(content) c := []schema.Content{} json.Unmarshal(dat, &c) + CONTENT: for _, pp := range c { - if pp.Type == "text" { + switch pp.Type { + case "text": input.Messages[i].StringContent = pp.Text - } else if pp.Type == "image_url" { - // Detect if pp.ImageURL is an URL, if it is download the image and encode it in base64: - base64, err := utils.GetImageURLAsBase64(pp.ImageURL.URL) - if err == nil { - input.Messages[i].StringImages = append(input.Messages[i].StringImages, base64) // TODO: make sure that we only return base64 stuff - // set a placeholder for each image - input.Messages[i].StringContent = fmt.Sprintf("[img-%d]", index) + input.Messages[i].StringContent - index++ - } else { - log.Error().Msgf("Failed encoding image: %s", err) + case "video", "video_url": + // Decode content as base64 either if it's an URL or base64 text + base64, err := utils.GetContentURIAsBase64(pp.VideoURL.URL) + if err != nil { + log.Error().Msgf("Failed encoding video: %s", err) + continue CONTENT } + input.Messages[i].StringVideos = append(input.Messages[i].StringVideos, base64) // TODO: make sure that we only return base64 stuff + // set a placeholder for each image + input.Messages[i].StringContent = fmt.Sprintf("[vid-%d]", vidIndex) + input.Messages[i].StringContent + vidIndex++ + case "image_url", "image": + // Decode content as base64 either if it's an URL or base64 text + + base64, err := utils.GetContentURIAsBase64(pp.ImageURL.URL) + if err != nil { + log.Error().Msgf("Failed encoding image: %s", err) + continue CONTENT + } + input.Messages[i].StringImages = append(input.Messages[i].StringImages, base64) // TODO: make sure that we only return base64 stuff + // set a placeholder for each image + input.Messages[i].StringContent = fmt.Sprintf("[img-%d]", imgIndex) + input.Messages[i].StringContent + imgIndex++ } } } diff --git a/core/schema/openai.go b/core/schema/openai.go index fe4745bf..32ed716b 100644 --- a/core/schema/openai.go +++ b/core/schema/openai.go @@ -58,6 +58,7 @@ type Content struct { Type string `json:"type" yaml:"type"` Text string `json:"text" yaml:"text"` ImageURL ContentURL `json:"image_url" yaml:"image_url"` + VideoURL ContentURL `json:"video_url" yaml:"video_url"` } type ContentURL struct { @@ -76,6 +77,7 @@ type Message struct { StringContent string `json:"string_content,omitempty" yaml:"string_content,omitempty"` StringImages []string `json:"string_images,omitempty" yaml:"string_images,omitempty"` + StringVideos []string `json:"string_videos,omitempty" yaml:"string_videos,omitempty"` // A result of a function call FunctionCall interface{} `json:"function_call,omitempty" yaml:"function_call,omitempty"` diff --git a/pkg/utils/base64.go b/pkg/utils/base64.go index 3fbb405b..50109eaa 100644 --- a/pkg/utils/base64.go +++ b/pkg/utils/base64.go @@ -13,14 +13,8 @@ var base64DownloadClient http.Client = http.Client{ Timeout: 30 * time.Second, } -// this function check if the string is an URL, if it's an URL downloads the image in memory -// encodes it in base64 and returns the base64 string - -// This may look weird down in pkg/utils while it is currently only used in core/config -// -// but I believe it may be useful for MQTT as well in the near future, so I'm -// extracting it while I'm thinking of it. -func GetImageURLAsBase64(s string) (string, error) { +// GetContentURIAsBase64 checks if the string is an URL, if it's an URL downloads the content in memory encodes it in base64 and returns the base64 string, otherwise returns the string by stripping base64 data headers +func GetContentURIAsBase64(s string) (string, error) { if strings.HasPrefix(s, "http") { // download the image resp, err := base64DownloadClient.Get(s) diff --git a/pkg/utils/base64_test.go b/pkg/utils/base64_test.go index 3b3dc9fb..1f0d1352 100644 --- a/pkg/utils/base64_test.go +++ b/pkg/utils/base64_test.go @@ -10,20 +10,20 @@ var _ = Describe("utils/base64 tests", func() { It("GetImageURLAsBase64 can strip jpeg data url prefixes", func() { // This one doesn't actually _care_ that it's base64, so feed "bad" data in this test in order to catch a change in that behavior for informational purposes. input := "data:image/jpeg;base64,FOO" - b64, err := GetImageURLAsBase64(input) + b64, err := GetContentURIAsBase64(input) Expect(err).To(BeNil()) Expect(b64).To(Equal("FOO")) }) It("GetImageURLAsBase64 can strip png data url prefixes", func() { // This one doesn't actually _care_ that it's base64, so feed "bad" data in this test in order to catch a change in that behavior for informational purposes. input := "data:image/png;base64,BAR" - b64, err := GetImageURLAsBase64(input) + b64, err := GetContentURIAsBase64(input) Expect(err).To(BeNil()) Expect(b64).To(Equal("BAR")) }) It("GetImageURLAsBase64 returns an error for bogus data", func() { input := "FOO" - b64, err := GetImageURLAsBase64(input) + b64, err := GetContentURIAsBase64(input) Expect(b64).To(Equal("")) Expect(err).ToNot(BeNil()) Expect(err).To(MatchError("not valid string")) @@ -31,7 +31,7 @@ var _ = Describe("utils/base64 tests", func() { It("GetImageURLAsBase64 can actually download images and calculates something", func() { // This test doesn't actually _check_ the results at this time, which is bad, but there wasn't a test at all before... input := "https://upload.wikimedia.org/wikipedia/en/2/29/Wargames.jpg" - b64, err := GetImageURLAsBase64(input) + b64, err := GetContentURIAsBase64(input) Expect(err).To(BeNil()) Expect(b64).ToNot(BeNil()) }) From 191bc2e50a721bd3164ad4700bcbb5d723ed7d03 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 19 Sep 2024 12:26:53 +0200 Subject: [PATCH 0143/1530] feat(api): allow to pass audios to backends (#3603) Signed-off-by: Ettore Di Giacinto --- backend/backend.proto | 1 + core/backend/llm.go | 3 ++- core/http/endpoints/openai/chat.go | 6 +++++- core/http/endpoints/openai/inference.go | 6 +++++- core/http/endpoints/openai/request.go | 14 ++++++++++++-- core/schema/openai.go | 2 ++ 6 files changed, 27 insertions(+), 5 deletions(-) diff --git a/backend/backend.proto b/backend/backend.proto index 6ef83567..31bd63e5 100644 --- a/backend/backend.proto +++ b/backend/backend.proto @@ -135,6 +135,7 @@ message PredictOptions { bool UseTokenizerTemplate = 43; repeated Message Messages = 44; repeated string Videos = 45; + repeated string Audios = 46; } // The response message containing the result diff --git a/core/backend/llm.go b/core/backend/llm.go index fa4c0709..f74071ba 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -31,7 +31,7 @@ type TokenUsage struct { Completion int } -func ModelInference(ctx context.Context, s string, messages []schema.Message, images, videos []string, loader *model.ModelLoader, c config.BackendConfig, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) { +func ModelInference(ctx context.Context, s string, messages []schema.Message, images, videos, audios []string, loader *model.ModelLoader, c config.BackendConfig, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) { modelFile := c.Model threads := c.Threads if *threads == 0 && o.Threads != 0 { @@ -102,6 +102,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im opts.UseTokenizerTemplate = c.TemplateConfig.UseTokenizerTemplate opts.Images = images opts.Videos = videos + opts.Audios = audios tokenUsage := TokenUsage{} diff --git a/core/http/endpoints/openai/chat.go b/core/http/endpoints/openai/chat.go index 742a4add..b937120a 100644 --- a/core/http/endpoints/openai/chat.go +++ b/core/http/endpoints/openai/chat.go @@ -644,8 +644,12 @@ func handleQuestion(config *config.BackendConfig, input *schema.OpenAIRequest, m for _, m := range input.Messages { videos = append(videos, m.StringVideos...) } + audios := []string{} + for _, m := range input.Messages { + audios = append(audios, m.StringAudios...) + } - predFunc, err := backend.ModelInference(input.Context, prompt, input.Messages, images, videos, ml, *config, o, nil) + predFunc, err := backend.ModelInference(input.Context, prompt, input.Messages, images, videos, audios, ml, *config, o, nil) if err != nil { log.Error().Err(err).Msg("model inference failed") return "", err diff --git a/core/http/endpoints/openai/inference.go b/core/http/endpoints/openai/inference.go index 4008ba3d..da75d3a1 100644 --- a/core/http/endpoints/openai/inference.go +++ b/core/http/endpoints/openai/inference.go @@ -31,9 +31,13 @@ func ComputeChoices( for _, m := range req.Messages { videos = append(videos, m.StringVideos...) } + audios := []string{} + for _, m := range req.Messages { + audios = append(audios, m.StringAudios...) + } // get the model function to call for the result - predFunc, err := backend.ModelInference(req.Context, predInput, req.Messages, images, videos, loader, *config, o, tokenCallback) + predFunc, err := backend.ModelInference(req.Context, predInput, req.Messages, images, videos, audios, loader, *config, o, tokenCallback) if err != nil { return result, backend.TokenUsage{}, err } diff --git a/core/http/endpoints/openai/request.go b/core/http/endpoints/openai/request.go index 456a1e0c..e24dd28f 100644 --- a/core/http/endpoints/openai/request.go +++ b/core/http/endpoints/openai/request.go @@ -135,7 +135,7 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque } // Decode each request's message content - imgIndex, vidIndex := 0, 0 + imgIndex, vidIndex, audioIndex := 0, 0, 0 for i, m := range input.Messages { switch content := m.Content.(type) { case string: @@ -160,9 +160,19 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque // set a placeholder for each image input.Messages[i].StringContent = fmt.Sprintf("[vid-%d]", vidIndex) + input.Messages[i].StringContent vidIndex++ + case "audio_url", "audio": + // Decode content as base64 either if it's an URL or base64 text + base64, err := utils.GetContentURIAsBase64(pp.AudioURL.URL) + if err != nil { + log.Error().Msgf("Failed encoding image: %s", err) + continue CONTENT + } + input.Messages[i].StringAudios = append(input.Messages[i].StringAudios, base64) // TODO: make sure that we only return base64 stuff + // set a placeholder for each image + input.Messages[i].StringContent = fmt.Sprintf("[audio-%d]", audioIndex) + input.Messages[i].StringContent + audioIndex++ case "image_url", "image": // Decode content as base64 either if it's an URL or base64 text - base64, err := utils.GetContentURIAsBase64(pp.ImageURL.URL) if err != nil { log.Error().Msgf("Failed encoding image: %s", err) diff --git a/core/schema/openai.go b/core/schema/openai.go index 32ed716b..15bcd13d 100644 --- a/core/schema/openai.go +++ b/core/schema/openai.go @@ -58,6 +58,7 @@ type Content struct { Type string `json:"type" yaml:"type"` Text string `json:"text" yaml:"text"` ImageURL ContentURL `json:"image_url" yaml:"image_url"` + AudioURL ContentURL `json:"audio_url" yaml:"audio_url"` VideoURL ContentURL `json:"video_url" yaml:"video_url"` } @@ -78,6 +79,7 @@ type Message struct { StringContent string `json:"string_content,omitempty" yaml:"string_content,omitempty"` StringImages []string `json:"string_images,omitempty" yaml:"string_images,omitempty"` StringVideos []string `json:"string_videos,omitempty" yaml:"string_videos,omitempty"` + StringAudios []string `json:"string_audios,omitempty" yaml:"string_audios,omitempty"` // A result of a function call FunctionCall interface{} `json:"function_call,omitempty" yaml:"function_call,omitempty"` From 5c9d26e39bdff8c3e836c686a83d1aba3c239893 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:49:32 +0200 Subject: [PATCH 0144/1530] feat(swagger): update swagger (#3604) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- swagger/docs.go | 12 ++++++++++++ swagger/swagger.json | 12 ++++++++++++ swagger/swagger.yaml | 8 ++++++++ 3 files changed, 32 insertions(+) diff --git a/swagger/docs.go b/swagger/docs.go index 44da7cf2..ffb2ba03 100644 --- a/swagger/docs.go +++ b/swagger/docs.go @@ -1394,6 +1394,12 @@ const docTemplate = `{ "description": "The message role", "type": "string" }, + "string_audios": { + "type": "array", + "items": { + "type": "string" + } + }, "string_content": { "type": "string" }, @@ -1403,6 +1409,12 @@ const docTemplate = `{ "type": "string" } }, + "string_videos": { + "type": "array", + "items": { + "type": "string" + } + }, "tool_calls": { "type": "array", "items": { diff --git a/swagger/swagger.json b/swagger/swagger.json index eaddf451..e3aebe43 100644 --- a/swagger/swagger.json +++ b/swagger/swagger.json @@ -1387,6 +1387,12 @@ "description": "The message role", "type": "string" }, + "string_audios": { + "type": "array", + "items": { + "type": "string" + } + }, "string_content": { "type": "string" }, @@ -1396,6 +1402,12 @@ "type": "string" } }, + "string_videos": { + "type": "array", + "items": { + "type": "string" + } + }, "tool_calls": { "type": "array", "items": { diff --git a/swagger/swagger.yaml b/swagger/swagger.yaml index c98e0ef4..649b86e4 100644 --- a/swagger/swagger.yaml +++ b/swagger/swagger.yaml @@ -453,12 +453,20 @@ definitions: role: description: The message role type: string + string_audios: + items: + type: string + type: array string_content: type: string string_images: items: type: string type: array + string_videos: + items: + type: string + type: array tool_calls: items: $ref: '#/definitions/schema.ToolCall' From 2fcea486eb72d0a0bd77513244d66c74a3ec8a47 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:50:14 +0200 Subject: [PATCH 0145/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `6026da52d6942b253df835070619775d849d0258` (#3605) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 286f4b5a..53def128 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=64c6af3195c3cd4aa3328a1282d29cd2635c34c9 +CPPLLAMA_VERSION?=6026da52d6942b253df835070619775d849d0258 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From a2a63460e92b042f274d0a4e126ef927ef78e25a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 10:59:29 +0200 Subject: [PATCH 0146/1530] models(gallery): add qwen2.5-14b-instruct (#3607) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 229697bb..4fe495fc 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,27 @@ --- +## Qwen2.5 +- &qwen25 + name: "qwen2.5-14b-instruct" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + license: apache-2.0 + description: | + Qwen2.5 is the latest series of Qwen large language models. For Qwen2.5, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters. + tags: + - llm + - gguf + - gpu + - qwen + - cpu + urls: + - https://huggingface.co/bartowski/Qwen2.5-14B-Instruct-GGUF + - https://huggingface.co/Qwen/Qwen2.5-7B-Instruct + overrides: + parameters: + model: Qwen2.5-14B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-14B-Instruct-Q4_K_M.gguf + sha256: e47ad95dad6ff848b431053b375adb5d39321290ea2c638682577dafca87c008 + uri: huggingface://bartowski/Qwen2.5-14B-Instruct-GGUF/Qwen2.5-14B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From c15f506fd511dc3208846753e1fded4d0a4191f0 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 11:18:49 +0200 Subject: [PATCH 0147/1530] models(gallery): add qwen2.5-math-7b-instruct (#3609) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 4fe495fc..8dc742ca 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -22,6 +22,24 @@ - filename: Qwen2.5-14B-Instruct-Q4_K_M.gguf sha256: e47ad95dad6ff848b431053b375adb5d39321290ea2c638682577dafca87c008 uri: huggingface://bartowski/Qwen2.5-14B-Instruct-GGUF/Qwen2.5-14B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-math-7b-instruct" + urls: + - https://huggingface.co/bartowski/Qwen2.5-Math-7B-Instruct-GGUF + - https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct + description: | + In August 2024, we released the first series of mathematical LLMs - Qwen2-Math - of our Qwen family. A month later, we have upgraded it and open-sourced Qwen2.5-Math series, including base models Qwen2.5-Math-1.5B/7B/72B, instruction-tuned models Qwen2.5-Math-1.5B/7B/72B-Instruct, and mathematical reward model Qwen2.5-Math-RM-72B. + + Unlike Qwen2-Math series which only supports using Chain-of-Thught (CoT) to solve English math problems, Qwen2.5-Math series is expanded to support using both CoT and Tool-integrated Reasoning (TIR) to solve math problems in both Chinese and English. The Qwen2.5-Math series models have achieved significant performance improvements compared to the Qwen2-Math series models on the Chinese and English mathematics benchmarks with CoT. + + The base models of Qwen2-Math are initialized with Qwen2-1.5B/7B/72B, and then pretrained on a meticulously designed Mathematics-specific Corpus. This corpus contains large-scale high-quality mathematical web texts, books, codes, exam questions, and mathematical pre-training data synthesized by Qwen2. + overrides: + parameters: + model: Qwen2.5-Math-7B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-Math-7B-Instruct-Q4_K_M.gguf + sha256: 7e03cee8c65b9ebf9ca14ddb010aca27b6b18e6c70f2779e94e7451d9529c091 + uri: huggingface://bartowski/Qwen2.5-Math-7B-Instruct-GGUF/Qwen2.5-Math-7B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From a5b08f43ff5a3f485264dd0b8bd6335b0bf4ce24 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 11:22:53 +0200 Subject: [PATCH 0148/1530] models(gallery): add qwen2.5-14b_uncencored (#3610) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 8dc742ca..77c5c107 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -11,6 +11,7 @@ - gguf - gpu - qwen + - qwen2.5 - cpu urls: - https://huggingface.co/bartowski/Qwen2.5-14B-Instruct-GGUF @@ -40,6 +41,31 @@ - filename: Qwen2.5-Math-7B-Instruct-Q4_K_M.gguf sha256: 7e03cee8c65b9ebf9ca14ddb010aca27b6b18e6c70f2779e94e7451d9529c091 uri: huggingface://bartowski/Qwen2.5-Math-7B-Instruct-GGUF/Qwen2.5-Math-7B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-14b_uncencored" + icon: https://huggingface.co/SicariusSicariiStuff/Phi-3.5-mini-instruct_Uncensored/resolve/main/Misc/Uncensored.png + urls: + - https://huggingface.co/SicariusSicariiStuff/Qwen2.5-14B_Uncencored + - https://huggingface.co/bartowski/Qwen2.5-14B_Uncencored-GGUF + description: | + Qwen2.5 is the latest series of Qwen large language models. For Qwen2.5, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters. + + Uncensored qwen2.5 + tags: + - llm + - gguf + - gpu + - qwen + - qwen2.5 + - cpu + - uncensored + overrides: + parameters: + model: Qwen2.5-14B_Uncencored-Q4_K_M.gguf + files: + - filename: Qwen2.5-14B_Uncencored-Q4_K_M.gguf + sha256: 066b9341b67e0fd0956de3576a3b7988574a5b9a0028aef2b9c8edeadd6dbbd1 + uri: huggingface://bartowski/Qwen2.5-14B_Uncencored-GGUF/Qwen2.5-14B_Uncencored-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From b6af4f4467724bd9d59e6f7f573f513f927fc8e2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 15:08:57 +0200 Subject: [PATCH 0149/1530] models(gallery): add qwen2.5-coder-7b-instruct (#3611) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 77c5c107..1f52fec8 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -66,6 +66,24 @@ - filename: Qwen2.5-14B_Uncencored-Q4_K_M.gguf sha256: 066b9341b67e0fd0956de3576a3b7988574a5b9a0028aef2b9c8edeadd6dbbd1 uri: huggingface://bartowski/Qwen2.5-14B_Uncencored-GGUF/Qwen2.5-14B_Uncencored-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-coder-7b-instruct" + urls: + - https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-Coder-7B-Instruct-GGUF + description: | + Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). For Qwen2.5-Coder, we release three base language models and instruction-tuned language models, 1.5, 7 and 32 (coming soon) billion parameters. Qwen2.5-Coder brings the following improvements upon CodeQwen1.5: + + Significantly improvements in code generation, code reasoning and code fixing. Base on the strong Qwen2.5, we scale up the training tokens into 5.5 trillion including source code, text-code grounding, Synthetic data, etc. + A more comprehensive foundation for real-world applications such as Code Agents. Not only enhancing coding capabilities but also maintaining its strengths in mathematics and general competencies. + Long-context Support up to 128K tokens. + overrides: + parameters: + model: Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf + sha256: 1664fccab734674a50763490a8c6931b70e3f2f8ec10031b54806d30e5f956b6 + uri: huggingface://bartowski/Qwen2.5-Coder-7B-Instruct-GGUF/Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 56d8f5163c427eb0e0d3b9483aa4e585f571a0bf Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 15:12:35 +0200 Subject: [PATCH 0150/1530] models(gallery): add qwen2.5-math-72b-instruct (#3612) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 1f52fec8..945c45b9 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -84,6 +84,23 @@ - filename: Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf sha256: 1664fccab734674a50763490a8c6931b70e3f2f8ec10031b54806d30e5f956b6 uri: huggingface://bartowski/Qwen2.5-Coder-7B-Instruct-GGUF/Qwen2.5-Coder-7B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-math-72b-instruct" + icon: http://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2.5/qwen2.5-math-pipeline.jpeg + urls: + - https://huggingface.co/Qwen/Qwen2.5-Math-72B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-Math-72B-Instruct-GGUF + description: | + In August 2024, we released the first series of mathematical LLMs - Qwen2-Math - of our Qwen family. A month later, we have upgraded it and open-sourced Qwen2.5-Math series, including base models Qwen2.5-Math-1.5B/7B/72B, instruction-tuned models Qwen2.5-Math-1.5B/7B/72B-Instruct, and mathematical reward model Qwen2.5-Math-RM-72B. + + Unlike Qwen2-Math series which only supports using Chain-of-Thught (CoT) to solve English math problems, Qwen2.5-Math series is expanded to support using both CoT and Tool-integrated Reasoning (TIR) to solve math problems in both Chinese and English. The Qwen2.5-Math series models have achieved significant performance improvements compared to the Qwen2-Math series models on the Chinese and English mathematics benchmarks with CoT + overrides: + parameters: + model: Qwen2.5-Math-72B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-Math-72B-Instruct-Q4_K_M.gguf + sha256: 5dee8a6e21d555577712b4f65565a3c3737a0d5d92f5a82970728c6d8e237f17 + uri: huggingface://bartowski/Qwen2.5-Math-72B-Instruct-GGUF/Qwen2.5-Math-72B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 052af98dcd3a5d50cd1c7f2f0920b77e508ada5e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 15:45:23 +0200 Subject: [PATCH 0151/1530] models(gallery): add qwen2.5-0.5b-instruct, qwen2.5-1.5b-instruct (#3613) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 945c45b9..adac3e51 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -101,6 +101,30 @@ - filename: Qwen2.5-Math-72B-Instruct-Q4_K_M.gguf sha256: 5dee8a6e21d555577712b4f65565a3c3737a0d5d92f5a82970728c6d8e237f17 uri: huggingface://bartowski/Qwen2.5-Math-72B-Instruct-GGUF/Qwen2.5-Math-72B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-0.5b-instruct" + urls: + - https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-0.5B-Instruct-GGUF + overrides: + parameters: + model: Qwen2.5-0.5B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-0.5B-Instruct-Q4_K_M.gguf + sha256: 6eb923e7d26e9cea28811e1a8e852009b21242fb157b26149d3b188f3a8c8653 + uri: huggingface://bartowski/Qwen2.5-0.5B-Instruct-GGUF/Qwen2.5-0.5B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-1.5b-instruct" + urls: + - https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-1.5B-Instruct-GGUF + overrides: + parameters: + model: Qwen2.5-1.5B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-1.5B-Instruct-Q4_K_M.gguf + sha256: 1adf0b11065d8ad2e8123ea110d1ec956dab4ab038eab665614adba04b6c3370 + uri: huggingface://bartowski/Qwen2.5-1.5B-Instruct-GGUF/Qwen2.5-1.5B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 38cad0b8dc32e3ce8d8650718c16df6725cb63dc Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 17:10:43 +0200 Subject: [PATCH 0152/1530] models(gallery): add qwen2.5 32B, 72B, 32B Instruct (#3614) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index adac3e51..5304f9d2 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -125,6 +125,42 @@ - filename: Qwen2.5-1.5B-Instruct-Q4_K_M.gguf sha256: 1adf0b11065d8ad2e8123ea110d1ec956dab4ab038eab665614adba04b6c3370 uri: huggingface://bartowski/Qwen2.5-1.5B-Instruct-GGUF/Qwen2.5-1.5B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-32b" + urls: + - https://huggingface.co/Qwen/Qwen2.5-32B + - https://huggingface.co/mradermacher/Qwen2.5-32B-GGUF + overrides: + parameters: + model: Qwen2.5-32B.Q4_K_M.gguf + files: + - filename: Qwen2.5-32B.Q4_K_M.gguf + sha256: 02703e27c8b964db445444581a6937ad7538f0c32a100b26b49fa0e8ff527155 + uri: huggingface://mradermacher/Qwen2.5-32B-GGUF/Qwen2.5-32B.Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-32b-instruct" + urls: + - https://huggingface.co/Qwen/Qwen2.5-32B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-32B-Instruct-GGUF + overrides: + parameters: + model: Qwen2.5-32B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-32B-Instruct-Q4_K_M.gguf + sha256: 2e5f6daea180dbc59f65a40641e94d3973b5dbaa32b3c0acf54647fa874e519e + uri: huggingface://bartowski/Qwen2.5-32B-Instruct-GGUF/Qwen2.5-32B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-72b-instruct" + urls: + - https://huggingface.co/Qwen/Qwen2.5-72B-Instruct + - https://huggingface.co/bartowski/Qwen2.5-72B-Instruct-GGUF + overrides: + parameters: + model: Qwen2.5-72B-Instruct-Q4_K_M.gguf + files: + - filename: Qwen2.5-72B-Instruct-Q4_K_M.gguf + sha256: e4c8fad16946be8cf0bbf67eb8f4e18fc7415a5a6d2854b4cda453edb4082545 + uri: huggingface://bartowski/Qwen2.5-72B-Instruct-GGUF/Qwen2.5-72B-Instruct-Q4_K_M.gguf ## SmolLM - &smollm url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From c4cecba07fda9c9db738aaaaa40756fbee3e879b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 17:19:53 +0200 Subject: [PATCH 0153/1530] models(gallery): add llama-3.1-supernova-lite-reflection-v1.0-i1 (#3615) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 5304f9d2..60eed4ce 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -601,6 +601,22 @@ - filename: Reflection-Llama-3.1-70B-q4_k_m.gguf sha256: 16064e07037883a750cfeae9a7be41143aa857dbac81c2e93c68e2f941dee7b2 uri: huggingface://senseable/Reflection-Llama-3.1-70B-gguf/Reflection-Llama-3.1-70B-q4_k_m.gguf +- !!merge <<: *llama31 + name: "llama-3.1-supernova-lite-reflection-v1.0-i1" + url: "github:mudler/LocalAI/gallery/llama3.1-reflective.yaml@master" + icon: https://i.ibb.co/r072p7j/eopi-ZVu-SQ0-G-Cav78-Byq-Tg.png + urls: + - https://huggingface.co/SE6446/Llama-3.1-SuperNova-Lite-Reflection-V1.0 + - https://huggingface.co/mradermacher/Llama-3.1-SuperNova-Lite-Reflection-V1.0-i1-GGUF + description: | + This model is a LoRA adaptation of arcee-ai/Llama-3.1-SuperNova-Lite on thesven/Reflective-MAGLLAMA-v0.1.1. This has been a simple experiment into reflection and the model appears to perform adequately, though I am unsure if it is a large improvement. + overrides: + parameters: + model: Llama-3.1-SuperNova-Lite-Reflection-V1.0.i1-Q4_K_M.gguf + files: + - filename: Llama-3.1-SuperNova-Lite-Reflection-V1.0.i1-Q4_K_M.gguf + sha256: 0c4531fe553d00142808e1bc7348ae92d400794c5b64d2db1a974718324dfe9a + uri: huggingface://mradermacher/Llama-3.1-SuperNova-Lite-Reflection-V1.0-i1-GGUF/Llama-3.1-SuperNova-Lite-Reflection-V1.0.i1-Q4_K_M.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From e24654ada064f0b7f6a2eb2be29b8136e52ccc0b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 17:23:30 +0200 Subject: [PATCH 0154/1530] models(gallery): add llama-3.1-supernova-lite (#3616) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 60eed4ce..c05593b1 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -617,6 +617,25 @@ - filename: Llama-3.1-SuperNova-Lite-Reflection-V1.0.i1-Q4_K_M.gguf sha256: 0c4531fe553d00142808e1bc7348ae92d400794c5b64d2db1a974718324dfe9a uri: huggingface://mradermacher/Llama-3.1-SuperNova-Lite-Reflection-V1.0-i1-GGUF/Llama-3.1-SuperNova-Lite-Reflection-V1.0.i1-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama-3.1-supernova-lite" + icon: https://i.ibb.co/r072p7j/eopi-ZVu-SQ0-G-Cav78-Byq-Tg.png + urls: + - https://huggingface.co/arcee-ai/Llama-3.1-SuperNova-Lite + - https://huggingface.co/arcee-ai/Llama-3.1-SuperNova-Lite-GGUF + description: | + Llama-3.1-SuperNova-Lite is an 8B parameter model developed by Arcee.ai, based on the Llama-3.1-8B-Instruct architecture. It is a distilled version of the larger Llama-3.1-405B-Instruct model, leveraging offline logits extracted from the 405B parameter variant. This 8B variation of Llama-3.1-SuperNova maintains high performance while offering exceptional instruction-following capabilities and domain-specific adaptability. + + The model was trained using a state-of-the-art distillation pipeline and an instruction dataset generated with EvolKit, ensuring accuracy and efficiency across a wide range of tasks. For more information on its training, visit blog.arcee.ai. + + Llama-3.1-SuperNova-Lite excels in both benchmark performance and real-world applications, providing the power of large-scale models in a more compact, efficient form ideal for organizations seeking high performance with reduced resource requirements. + overrides: + parameters: + model: supernova-lite-v1.Q4_K_M.gguf + files: + - filename: supernova-lite-v1.Q4_K_M.gguf + sha256: 237b7b0b704d294f92f36c576cc8fdc10592f95168a5ad0f075a2d8edf20da4d + uri: huggingface://arcee-ai/Llama-3.1-SuperNova-Lite-GGUF/supernova-lite-v1.Q4_K_M.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From f55053bfbaa9a71ea72b9efb0aa4f5347dc34574 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 17:26:59 +0200 Subject: [PATCH 0155/1530] models(gallery): add llama3.1-8b-shiningvaliant2 (#3617) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c05593b1..3c3b1a23 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -636,6 +636,24 @@ - filename: supernova-lite-v1.Q4_K_M.gguf sha256: 237b7b0b704d294f92f36c576cc8fdc10592f95168a5ad0f075a2d8edf20da4d uri: huggingface://arcee-ai/Llama-3.1-SuperNova-Lite-GGUF/supernova-lite-v1.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama3.1-8b-shiningvaliant2" + icon: https://cdn-uploads.huggingface.co/production/uploads/63444f2687964b331809eb55/EXX7TKbB-R6arxww2mk0R.jpeg + urls: + - https://huggingface.co/ValiantLabs/Llama3.1-8B-ShiningValiant2 + - https://huggingface.co/bartowski/Llama3.1-8B-ShiningValiant2-GGUF + description: | + Shining Valiant 2 is a chat model built on Llama 3.1 8b, finetuned on our data for friendship, insight, knowledge and enthusiasm. + + Finetuned on meta-llama/Meta-Llama-3.1-8B-Instruct for best available general performance + Trained on a variety of high quality data; focused on science, engineering, technical knowledge, and structured reasoning + overrides: + parameters: + model: Llama3.1-8B-ShiningValiant2-Q4_K_M.gguf + files: + - filename: Llama3.1-8B-ShiningValiant2-Q4_K_M.gguf + sha256: 9369eb97922a9f01e4eae610e3d7aaeca30762d78d9239884179451d60bdbdd2 + uri: huggingface://bartowski/Llama3.1-8B-ShiningValiant2-GGUF/Llama3.1-8B-ShiningValiant2-Q4_K_M.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From 415cf31aa3e51aa44f1097d0459f8d410e3adb27 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 17:33:29 +0200 Subject: [PATCH 0156/1530] models(gallery): add buddy2 (#3618) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 3c3b1a23..b46967ad 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2065,6 +2065,20 @@ - filename: datagemma-rig-27b-it-Q4_K_M.gguf sha256: a6738ffbb49b6c46d220e2793df85c0538e9ac72398e32a0914ee5e55c3096ad uri: huggingface://bartowski/datagemma-rig-27b-it-GGUF/datagemma-rig-27b-it-Q4_K_M.gguf +- !!merge <<: *gemma + name: "buddy-2b-v1" + urls: + - https://huggingface.co/TheDrummer/Buddy-2B-v1 + - https://huggingface.co/bartowski/Buddy-2B-v1-GGUF + description: | + Buddy is designed as an empathetic language model, aimed at fostering introspection, self-reflection, and personal growth through thoughtful conversation. Buddy won't judge and it won't dismiss your concerns. Get some self-care with Buddy. + overrides: + parameters: + model: Buddy-2B-v1-Q4_K_M.gguf + files: + - filename: Buddy-2B-v1-Q4_K_M.gguf + sha256: 9bd25ed907d1a3c2e07fe09399a9b3aec107d368c29896e2c46facede5b7e3d5 + uri: huggingface://bartowski/Buddy-2B-v1-GGUF/Buddy-2B-v1-Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 00d6c2a96683ffc6d169ecaeeaa9d5c5bb8384f1 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 17:35:06 +0200 Subject: [PATCH 0157/1530] models(gallery): add llama3.1-reflective config Signed-off-by: Ettore Di Giacinto --- gallery/llama3.1-reflective.yaml | 65 ++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 gallery/llama3.1-reflective.yaml diff --git a/gallery/llama3.1-reflective.yaml b/gallery/llama3.1-reflective.yaml new file mode 100644 index 00000000..86a91d8b --- /dev/null +++ b/gallery/llama3.1-reflective.yaml @@ -0,0 +1,65 @@ +--- +name: "llama3-instruct" + +config_file: | + mmap: true + cutstrings: + - (.*?) + function: + disable_no_action: true + grammar: + disable: true + response_regex: + - \w+)>(?P.*) + template: + chat_message: | + <|start_header_id|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}<|end_header_id|> + + {{ if .FunctionCall -}} + Function call: + {{ else if eq .RoleName "tool" -}} + Function response: + {{ end -}} + {{ if .Content -}} + {{.Content -}} + {{ else if .FunctionCall -}} + {{ toJson .FunctionCall -}} + {{ end -}} + <|eot_id|> + function: | + <|start_header_id|>system<|end_header_id|> + + You have access to the following functions: + + {{range .Functions}} + Use the function '{{.Name}}' to '{{.Description}}' + {{toJson .Parameters}} + {{end}} + + Think very carefully before calling functions. + If a you choose to call a function ONLY reply in the following format with no prefix or suffix: + + {{`{{"example_name": "example_value"}}`}} + + Reminder: + - If looking for real time information use relevant functions before falling back to searching on internet + - Function calls MUST follow the specified format, start with + - Required parameters MUST be specified + - Only call one function at a time + - Put the entire function call reply on one line + <|eot_id|> + {{.Input }} + <|start_header_id|>assistant<|end_header_id|> + chat: | + {{.Input }} + <|start_header_id|>assistant<|end_header_id|> + + completion: | + {{.Input}} + context_size: 8192 + f16: true + stopwords: + - <|im_end|> + - + - "<|eot_id|>" + - <|end_of_text|> From 6c6cd8bbe0af9c93560b5eb20b8153d53625ac63 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 18:15:51 +0200 Subject: [PATCH 0158/1530] models(gallery): add llama-3.1-8b-arliai-rpmax-v1.1 (#3619) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index b46967ad..59cab687 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -956,6 +956,20 @@ - filename: Llama-3.1-8B-Stheno-v3.4-Q4_K_M-imat.gguf sha256: 830d4858aa11a654f82f69fa40dee819edf9ecf54213057648304eb84b8dd5eb uri: huggingface://Lewdiculous/Llama-3.1-8B-Stheno-v3.4-GGUF-IQ-Imatrix/Llama-3.1-8B-Stheno-v3.4-Q4_K_M-imat.gguf +- !!merge <<: *llama31 + name: "llama-3.1-8b-arliai-rpmax-v1.1" + urls: + - https://huggingface.co/ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.1 + - https://huggingface.co/bartowski/Llama-3.1-8B-ArliAI-RPMax-v1.1-GGUF + description: | + RPMax is a series of models that are trained on a diverse set of curated creative writing and RP datasets with a focus on variety and deduplication. This model is designed to be highly creative and non-repetitive by making sure no two entries in the dataset have repeated characters or situations, which makes sure the model does not latch on to a certain personality and be capable of understanding and acting appropriately to any characters or situations. + overrides: + parameters: + model: Llama-3.1-8B-ArliAI-RPMax-v1.1-Q4_K_M.gguf + files: + - filename: Llama-3.1-8B-ArliAI-RPMax-v1.1-Q4_K_M.gguf + sha256: 0a601c7341228d9160332965298d799369a1dc2b7080771fb8051bdeb556b30c + uri: huggingface://bartowski/Llama-3.1-8B-ArliAI-RPMax-v1.1-GGUF/Llama-3.1-8B-ArliAI-RPMax-v1.1-Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From bf8e50a11d2aa2ae3e27c770812a402c5c8cc6eb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 20 Sep 2024 18:16:01 +0200 Subject: [PATCH 0159/1530] chore(docs): add Vulkan images links (#3620) Signed-off-by: Ettore Di Giacinto --- docs/content/docs/getting-started/container-images.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/content/docs/getting-started/container-images.md b/docs/content/docs/getting-started/container-images.md index 86fe31d1..25385f23 100644 --- a/docs/content/docs/getting-started/container-images.md +++ b/docs/content/docs/getting-started/container-images.md @@ -154,7 +154,7 @@ Images are available with and without python dependencies. Note that images with Images with `core` in the tag are smaller and do not contain any python dependencies. -{{< tabs tabTotal="6" >}} +{{< tabs tabTotal="7" >}} {{% tab tabName="Vanilla / CPU Images" %}} | Description | Quay | Docker Hub | @@ -227,6 +227,15 @@ Images with `core` in the tag are smaller and do not contain any python dependen {{% /tab %}} + +{{% tab tabName="Vulkan Images" %}} +| Description | Quay | Docker Hub | +| --- | --- |-------------------------------------------------------------| +| Latest images from the branch (development) | `quay.io/go-skynet/local-ai: master-vulkan-ffmpeg-core ` | `localai/localai: master-vulkan-ffmpeg-core ` | +| Latest tag | `quay.io/go-skynet/local-ai: latest-vulkan-ffmpeg-core ` | `localai/localai: latest-vulkan-ffmpeg-core` | +| Versioned image including FFMpeg, no python | `quay.io/go-skynet/local-ai:{{< version >}}-vulkan-fmpeg-core` | `localai/localai:{{< version >}}-vulkan-fmpeg-core` | +{{% /tab %}} + {{< /tabs >}} ## See Also From cef7f8a0146474e1e30676ea820f8b5047bc73b2 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 20 Sep 2024 23:41:13 +0200 Subject: [PATCH 0160/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `34972dbe221709323714fc8402f2e24041d48213` (#3623) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 53def128..89b0d4aa 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=5b1ce40fa882e9cb8630b48032067a1ed2f1534f +WHISPER_CPP_VERSION?=34972dbe221709323714fc8402f2e24041d48213 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 54f2657870c73a100c69ad55c862cfc41f9da028 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 21 Sep 2024 10:09:41 +0200 Subject: [PATCH 0161/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `63351143b2ea5efe9f8b9c61f553af8a51f1deff` (#3622) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 89b0d4aa..83fb1215 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=6026da52d6942b253df835070619775d849d0258 +CPPLLAMA_VERSION?=63351143b2ea5efe9f8b9c61f553af8a51f1deff # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From c22b3187a7179f1dc721d71c4e18742e173275aa Mon Sep 17 00:00:00 2001 From: lnyxaris Date: Sat, 21 Sep 2024 10:10:27 +0200 Subject: [PATCH 0162/1530] Fix NeuralDaredevil URL (#3621) Signed-off-by: lnyxaris --- gallery/index.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 59cab687..7dab9eb7 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3972,7 +3972,7 @@ files: - filename: NeuralDaredevil-8B-abliterated.Q4_K_M.gguf sha256: 12f4af9d66817d7d300bd9a181e4fe66f7ecf7ea972049f2cbd0554cdc3ecf05 - uri: huggingface://QuantFactory/NeuralDaredevil-8B-abliterated-GGUF/Poppy_Porpoise-0.85-L3-8B-Q4_K_M-imat.gguf + uri: huggingface://QuantFactory/NeuralDaredevil-8B-abliterated-GGUF/NeuralDaredevil-8B-abliterated.Q4_K_M.gguf - !!merge <<: *llama3 name: "llama-3-8b-instruct-mopeymule" urls: From 5c3d1d81e63e823278c8630b4a2a3a93ddf6af0c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 21 Sep 2024 16:04:04 +0200 Subject: [PATCH 0163/1530] fix(parler-tts): fix install with sycl (#3624) Signed-off-by: Ettore Di Giacinto --- backend/python/parler-tts/install.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/backend/python/parler-tts/install.sh b/backend/python/parler-tts/install.sh index 002472a2..aae690c4 100755 --- a/backend/python/parler-tts/install.sh +++ b/backend/python/parler-tts/install.sh @@ -15,5 +15,12 @@ installRequirements # https://github.com/descriptinc/audiotools/issues/101 # incompatible protobuf versions. -PYDIR=$(ls ${MY_DIR}/venv/lib) -curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/builder.py +PYDIR=python3.10 +pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/" + +if [ ! -d ${pyenv} ]; then + echo "(parler-tts/install.sh): Error: ${pyenv} does not exist" + exit 1 +fi + +curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py From 20c0e128c00601edb7e46089c1e32672f353c52e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 21 Sep 2024 21:52:12 +0200 Subject: [PATCH 0164/1530] fix(sycl): downgrade pypinyin melotts requires pypinyin 0.50 Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index cea7de0b..a9a4cc20 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -15,7 +15,7 @@ unidecode==1.3.7 whisper-timestamped==1.15.4 openai python-dotenv -pypinyin==0.53.0 +pypinyin==0.50.0 cn2an==0.5.22 jieba==0.42.1 gradio==4.38.1 From 1f43678d5311e7bdc434768ea74c97e49a6ebc7e Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 22 Sep 2024 00:03:23 +0200 Subject: [PATCH 0165/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `d09770cae71b416c032ec143dda530f7413c4038` (#3626) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 83fb1215..51755e71 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=63351143b2ea5efe9f8b9c61f553af8a51f1deff +CPPLLAMA_VERSION?=d09770cae71b416c032ec143dda530f7413c4038 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From ee21b00a8d6b652b61d075e3bba1b88c8d52488c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Serta=C3=A7=20=C3=96zercan?= <852750+sozercan@users.noreply.github.com> Date: Sun, 22 Sep 2024 01:03:30 -0700 Subject: [PATCH 0166/1530] feat: auto load into memory on startup (#3627) Signed-off-by: Sertac Ozercan --- core/backend/embeddings.go | 2 +- core/backend/image.go | 2 +- core/backend/llm.go | 2 +- core/backend/options.go | 2 +- core/backend/rerank.go | 2 +- core/backend/soundgeneration.go | 2 +- core/backend/tts.go | 2 +- core/cli/run.go | 2 + core/config/application_config.go | 7 + core/startup/startup.go | 449 ++++++++++++++++-------------- 10 files changed, 259 insertions(+), 213 deletions(-) diff --git a/core/backend/embeddings.go b/core/backend/embeddings.go index 31b10a19..9f0f8be9 100644 --- a/core/backend/embeddings.go +++ b/core/backend/embeddings.go @@ -12,7 +12,7 @@ import ( func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() ([]float32, error), error) { modelFile := backendConfig.Model - grpcOpts := gRPCModelOpts(backendConfig) + grpcOpts := GRPCModelOpts(backendConfig) var inferenceModel interface{} var err error diff --git a/core/backend/image.go b/core/backend/image.go index 8c3f56b3..5c2a950c 100644 --- a/core/backend/image.go +++ b/core/backend/image.go @@ -12,7 +12,7 @@ func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negat if *threads == 0 && appConfig.Threads != 0 { threads = &appConfig.Threads } - gRPCOpts := gRPCModelOpts(backendConfig) + gRPCOpts := GRPCModelOpts(backendConfig) opts := modelOpts(backendConfig, appConfig, []model.Option{ model.WithBackendString(backendConfig.Backend), model.WithAssetDir(appConfig.AssetsDestination), diff --git a/core/backend/llm.go b/core/backend/llm.go index f74071ba..cac9beba 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -37,7 +37,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im if *threads == 0 && o.Threads != 0 { threads = &o.Threads } - grpcOpts := gRPCModelOpts(c) + grpcOpts := GRPCModelOpts(c) var inferenceModel grpc.Backend var err error diff --git a/core/backend/options.go b/core/backend/options.go index d986b8e6..d431aab6 100644 --- a/core/backend/options.go +++ b/core/backend/options.go @@ -44,7 +44,7 @@ func getSeed(c config.BackendConfig) int32 { return seed } -func gRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { +func GRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { b := 512 if c.Batch != 0 { b = c.Batch diff --git a/core/backend/rerank.go b/core/backend/rerank.go index 1b718be2..a7573ade 100644 --- a/core/backend/rerank.go +++ b/core/backend/rerank.go @@ -15,7 +15,7 @@ func Rerank(backend, modelFile string, request *proto.RerankRequest, loader *mod return nil, fmt.Errorf("backend is required") } - grpcOpts := gRPCModelOpts(backendConfig) + grpcOpts := GRPCModelOpts(backendConfig) opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ model.WithBackendString(bb), diff --git a/core/backend/soundgeneration.go b/core/backend/soundgeneration.go index abd5221b..b6a1c827 100644 --- a/core/backend/soundgeneration.go +++ b/core/backend/soundgeneration.go @@ -29,7 +29,7 @@ func SoundGeneration( return "", nil, fmt.Errorf("backend is a required parameter") } - grpcOpts := gRPCModelOpts(backendConfig) + grpcOpts := GRPCModelOpts(backendConfig) opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ model.WithBackendString(backend), model.WithModel(modelFile), diff --git a/core/backend/tts.go b/core/backend/tts.go index 258882ae..2401748c 100644 --- a/core/backend/tts.go +++ b/core/backend/tts.go @@ -28,7 +28,7 @@ func ModelTTS( bb = model.PiperBackend } - grpcOpts := gRPCModelOpts(backendConfig) + grpcOpts := GRPCModelOpts(backendConfig) opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ model.WithBackendString(bb), diff --git a/core/cli/run.go b/core/cli/run.go index afb7204c..a67839a0 100644 --- a/core/cli/run.go +++ b/core/cli/run.go @@ -69,6 +69,7 @@ type RunCMD struct { WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"` Federated bool `env:"LOCALAI_FEDERATED,FEDERATED" help:"Enable federated instance" group:"federated"` DisableGalleryEndpoint bool `env:"LOCALAI_DISABLE_GALLERY_ENDPOINT,DISABLE_GALLERY_ENDPOINT" help:"Disable the gallery endpoints" group:"api"` + LoadToMemory []string `env:"LOCALAI_LOAD_TO_MEMORY,LOAD_TO_MEMORY" help:"A list of models to load into memory at startup" group:"models"` } func (r *RunCMD) Run(ctx *cliContext.Context) error { @@ -104,6 +105,7 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error { config.WithDisableApiKeyRequirementForHttpGet(r.DisableApiKeyRequirementForHttpGet), config.WithHttpGetExemptedEndpoints(r.HttpGetExemptedEndpoints), config.WithP2PNetworkID(r.Peer2PeerNetworkID), + config.WithLoadToMemory(r.LoadToMemory), } token := "" diff --git a/core/config/application_config.go b/core/config/application_config.go index afbf325f..2af0c7ae 100644 --- a/core/config/application_config.go +++ b/core/config/application_config.go @@ -41,6 +41,7 @@ type ApplicationConfig struct { DisableApiKeyRequirementForHttpGet bool HttpGetExemptedEndpoints []*regexp.Regexp DisableGalleryEndpoint bool + LoadToMemory []string ModelLibraryURL string @@ -331,6 +332,12 @@ func WithOpaqueErrors(opaque bool) AppOption { } } +func WithLoadToMemory(models []string) AppOption { + return func(o *ApplicationConfig) { + o.LoadToMemory = models + } +} + func WithSubtleKeyComparison(subtle bool) AppOption { return func(o *ApplicationConfig) { o.UseSubtleKeyComparison = subtle diff --git a/core/startup/startup.go b/core/startup/startup.go index 3565d196..b7b9ce8f 100644 --- a/core/startup/startup.go +++ b/core/startup/startup.go @@ -1,206 +1,243 @@ -package startup - -import ( - "fmt" - "os" - - "github.com/mudler/LocalAI/core" - "github.com/mudler/LocalAI/core/config" - "github.com/mudler/LocalAI/core/services" - "github.com/mudler/LocalAI/internal" - "github.com/mudler/LocalAI/pkg/assets" - "github.com/mudler/LocalAI/pkg/library" - "github.com/mudler/LocalAI/pkg/model" - pkgStartup "github.com/mudler/LocalAI/pkg/startup" - "github.com/mudler/LocalAI/pkg/xsysinfo" - "github.com/rs/zerolog/log" -) - -func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.ModelLoader, *config.ApplicationConfig, error) { - options := config.NewApplicationConfig(opts...) - - log.Info().Msgf("Starting LocalAI using %d threads, with models path: %s", options.Threads, options.ModelPath) - log.Info().Msgf("LocalAI version: %s", internal.PrintableVersion()) - caps, err := xsysinfo.CPUCapabilities() - if err == nil { - log.Debug().Msgf("CPU capabilities: %v", caps) - } - gpus, err := xsysinfo.GPUs() - if err == nil { - log.Debug().Msgf("GPU count: %d", len(gpus)) - for _, gpu := range gpus { - log.Debug().Msgf("GPU: %s", gpu.String()) - } - } - - // Make sure directories exists - if options.ModelPath == "" { - return nil, nil, nil, fmt.Errorf("options.ModelPath cannot be empty") - } - err = os.MkdirAll(options.ModelPath, 0750) - if err != nil { - return nil, nil, nil, fmt.Errorf("unable to create ModelPath: %q", err) - } - if options.ImageDir != "" { - err := os.MkdirAll(options.ImageDir, 0750) - if err != nil { - return nil, nil, nil, fmt.Errorf("unable to create ImageDir: %q", err) - } - } - if options.AudioDir != "" { - err := os.MkdirAll(options.AudioDir, 0750) - if err != nil { - return nil, nil, nil, fmt.Errorf("unable to create AudioDir: %q", err) - } - } - if options.UploadDir != "" { - err := os.MkdirAll(options.UploadDir, 0750) - if err != nil { - return nil, nil, nil, fmt.Errorf("unable to create UploadDir: %q", err) - } - } - - if err := pkgStartup.InstallModels(options.Galleries, options.ModelLibraryURL, options.ModelPath, options.EnforcePredownloadScans, nil, options.ModelsURL...); err != nil { - log.Error().Err(err).Msg("error installing models") - } - - cl := config.NewBackendConfigLoader(options.ModelPath) - ml := model.NewModelLoader(options.ModelPath) - - configLoaderOpts := options.ToConfigLoaderOptions() - - if err := cl.LoadBackendConfigsFromPath(options.ModelPath, configLoaderOpts...); err != nil { - log.Error().Err(err).Msg("error loading config files") - } - - if options.ConfigFile != "" { - if err := cl.LoadMultipleBackendConfigsSingleFile(options.ConfigFile, configLoaderOpts...); err != nil { - log.Error().Err(err).Msg("error loading config file") - } - } - - if err := cl.Preload(options.ModelPath); err != nil { - log.Error().Err(err).Msg("error downloading models") - } - - if options.PreloadJSONModels != "" { - if err := services.ApplyGalleryFromString(options.ModelPath, options.PreloadJSONModels, options.EnforcePredownloadScans, options.Galleries); err != nil { - return nil, nil, nil, err - } - } - - if options.PreloadModelsFromPath != "" { - if err := services.ApplyGalleryFromFile(options.ModelPath, options.PreloadModelsFromPath, options.EnforcePredownloadScans, options.Galleries); err != nil { - return nil, nil, nil, err - } - } - - if options.Debug { - for _, v := range cl.GetAllBackendConfigs() { - log.Debug().Msgf("Model: %s (config: %+v)", v.Name, v) - } - } - - if options.AssetsDestination != "" { - // Extract files from the embedded FS - err := assets.ExtractFiles(options.BackendAssets, options.AssetsDestination) - log.Debug().Msgf("Extracting backend assets files to %s", options.AssetsDestination) - if err != nil { - log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly)", err) - } - } - - if options.LibPath != "" { - // If there is a lib directory, set LD_LIBRARY_PATH to include it - err := library.LoadExternal(options.LibPath) - if err != nil { - log.Error().Err(err).Str("LibPath", options.LibPath).Msg("Error while loading external libraries") - } - } - - // turn off any process that was started by GRPC if the context is canceled - go func() { - <-options.Context.Done() - log.Debug().Msgf("Context canceled, shutting down") - err := ml.StopAllGRPC() - if err != nil { - log.Error().Err(err).Msg("error while stopping all grpc backends") - } - }() - - if options.WatchDog { - wd := model.NewWatchDog( - ml, - options.WatchDogBusyTimeout, - options.WatchDogIdleTimeout, - options.WatchDogBusy, - options.WatchDogIdle) - ml.SetWatchDog(wd) - go wd.Run() - go func() { - <-options.Context.Done() - log.Debug().Msgf("Context canceled, shutting down") - wd.Shutdown() - }() - } - - // Watch the configuration directory - startWatcher(options) - - log.Info().Msg("core/startup process completed!") - return cl, ml, options, nil -} - -func startWatcher(options *config.ApplicationConfig) { - if options.DynamicConfigsDir == "" { - // No need to start the watcher if the directory is not set - return - } - - if _, err := os.Stat(options.DynamicConfigsDir); err != nil { - if os.IsNotExist(err) { - // We try to create the directory if it does not exist and was specified - if err := os.MkdirAll(options.DynamicConfigsDir, 0700); err != nil { - log.Error().Err(err).Msg("failed creating DynamicConfigsDir") - } - } else { - // something else happened, we log the error and don't start the watcher - log.Error().Err(err).Msg("failed to read DynamicConfigsDir, watcher will not be started") - return - } - } - - configHandler := newConfigFileHandler(options) - if err := configHandler.Watch(); err != nil { - log.Error().Err(err).Msg("failed creating watcher") - } -} - -// In Lieu of a proper DI framework, this function wires up the Application manually. -// This is in core/startup rather than core/state.go to keep package references clean! -func createApplication(appConfig *config.ApplicationConfig) *core.Application { - app := &core.Application{ - ApplicationConfig: appConfig, - BackendConfigLoader: config.NewBackendConfigLoader(appConfig.ModelPath), - ModelLoader: model.NewModelLoader(appConfig.ModelPath), - } - - var err error - - // app.EmbeddingsBackendService = backend.NewEmbeddingsBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) - // app.ImageGenerationBackendService = backend.NewImageGenerationBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) - // app.LLMBackendService = backend.NewLLMBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) - // app.TranscriptionBackendService = backend.NewTranscriptionBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) - // app.TextToSpeechBackendService = backend.NewTextToSpeechBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) - - app.BackendMonitorService = services.NewBackendMonitorService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) - app.GalleryService = services.NewGalleryService(app.ApplicationConfig) - // app.OpenAIService = services.NewOpenAIService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig, app.LLMBackendService) - - app.LocalAIMetricsService, err = services.NewLocalAIMetricsService() - if err != nil { - log.Error().Err(err).Msg("encountered an error initializing metrics service, startup will continue but metrics will not be tracked.") - } - - return app -} +package startup + +import ( + "fmt" + "os" + + "github.com/mudler/LocalAI/core" + "github.com/mudler/LocalAI/core/backend" + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/core/services" + "github.com/mudler/LocalAI/internal" + "github.com/mudler/LocalAI/pkg/assets" + "github.com/mudler/LocalAI/pkg/library" + "github.com/mudler/LocalAI/pkg/model" + pkgStartup "github.com/mudler/LocalAI/pkg/startup" + "github.com/mudler/LocalAI/pkg/xsysinfo" + "github.com/rs/zerolog/log" +) + +func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.ModelLoader, *config.ApplicationConfig, error) { + options := config.NewApplicationConfig(opts...) + + log.Info().Msgf("Starting LocalAI using %d threads, with models path: %s", options.Threads, options.ModelPath) + log.Info().Msgf("LocalAI version: %s", internal.PrintableVersion()) + caps, err := xsysinfo.CPUCapabilities() + if err == nil { + log.Debug().Msgf("CPU capabilities: %v", caps) + } + gpus, err := xsysinfo.GPUs() + if err == nil { + log.Debug().Msgf("GPU count: %d", len(gpus)) + for _, gpu := range gpus { + log.Debug().Msgf("GPU: %s", gpu.String()) + } + } + + // Make sure directories exists + if options.ModelPath == "" { + return nil, nil, nil, fmt.Errorf("options.ModelPath cannot be empty") + } + err = os.MkdirAll(options.ModelPath, 0750) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create ModelPath: %q", err) + } + if options.ImageDir != "" { + err := os.MkdirAll(options.ImageDir, 0750) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create ImageDir: %q", err) + } + } + if options.AudioDir != "" { + err := os.MkdirAll(options.AudioDir, 0750) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create AudioDir: %q", err) + } + } + if options.UploadDir != "" { + err := os.MkdirAll(options.UploadDir, 0750) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create UploadDir: %q", err) + } + } + + if err := pkgStartup.InstallModels(options.Galleries, options.ModelLibraryURL, options.ModelPath, options.EnforcePredownloadScans, nil, options.ModelsURL...); err != nil { + log.Error().Err(err).Msg("error installing models") + } + + cl := config.NewBackendConfigLoader(options.ModelPath) + ml := model.NewModelLoader(options.ModelPath) + + configLoaderOpts := options.ToConfigLoaderOptions() + + if err := cl.LoadBackendConfigsFromPath(options.ModelPath, configLoaderOpts...); err != nil { + log.Error().Err(err).Msg("error loading config files") + } + + if options.ConfigFile != "" { + if err := cl.LoadMultipleBackendConfigsSingleFile(options.ConfigFile, configLoaderOpts...); err != nil { + log.Error().Err(err).Msg("error loading config file") + } + } + + if err := cl.Preload(options.ModelPath); err != nil { + log.Error().Err(err).Msg("error downloading models") + } + + if options.PreloadJSONModels != "" { + if err := services.ApplyGalleryFromString(options.ModelPath, options.PreloadJSONModels, options.EnforcePredownloadScans, options.Galleries); err != nil { + return nil, nil, nil, err + } + } + + if options.PreloadModelsFromPath != "" { + if err := services.ApplyGalleryFromFile(options.ModelPath, options.PreloadModelsFromPath, options.EnforcePredownloadScans, options.Galleries); err != nil { + return nil, nil, nil, err + } + } + + if options.Debug { + for _, v := range cl.GetAllBackendConfigs() { + log.Debug().Msgf("Model: %s (config: %+v)", v.Name, v) + } + } + + if options.AssetsDestination != "" { + // Extract files from the embedded FS + err := assets.ExtractFiles(options.BackendAssets, options.AssetsDestination) + log.Debug().Msgf("Extracting backend assets files to %s", options.AssetsDestination) + if err != nil { + log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly)", err) + } + } + + if options.LibPath != "" { + // If there is a lib directory, set LD_LIBRARY_PATH to include it + err := library.LoadExternal(options.LibPath) + if err != nil { + log.Error().Err(err).Str("LibPath", options.LibPath).Msg("Error while loading external libraries") + } + } + + // turn off any process that was started by GRPC if the context is canceled + go func() { + <-options.Context.Done() + log.Debug().Msgf("Context canceled, shutting down") + err := ml.StopAllGRPC() + if err != nil { + log.Error().Err(err).Msg("error while stopping all grpc backends") + } + }() + + if options.WatchDog { + wd := model.NewWatchDog( + ml, + options.WatchDogBusyTimeout, + options.WatchDogIdleTimeout, + options.WatchDogBusy, + options.WatchDogIdle) + ml.SetWatchDog(wd) + go wd.Run() + go func() { + <-options.Context.Done() + log.Debug().Msgf("Context canceled, shutting down") + wd.Shutdown() + }() + } + + if options.LoadToMemory != nil { + for _, m := range options.LoadToMemory { + cfg, err := cl.LoadBackendConfigFileByName(m, options.ModelPath, + config.LoadOptionDebug(options.Debug), + config.LoadOptionThreads(options.Threads), + config.LoadOptionContextSize(options.ContextSize), + config.LoadOptionF16(options.F16), + config.ModelPath(options.ModelPath), + ) + if err != nil { + return nil, nil, nil, err + } + + log.Debug().Msgf("Auto loading model %s into memory from file: %s", m, cfg.Model) + + grpcOpts := backend.GRPCModelOpts(*cfg) + o := []model.Option{ + model.WithModel(cfg.Model), + model.WithAssetDir(options.AssetsDestination), + model.WithThreads(uint32(options.Threads)), + model.WithLoadGRPCLoadModelOpts(grpcOpts), + } + + var backendErr error + if cfg.Backend != "" { + o = append(o, model.WithBackendString(cfg.Backend)) + _, backendErr = ml.BackendLoader(o...) + } else { + _, backendErr = ml.GreedyLoader(o...) + } + if backendErr != nil { + return nil, nil, nil, err + } + } + } + + // Watch the configuration directory + startWatcher(options) + + log.Info().Msg("core/startup process completed!") + return cl, ml, options, nil +} + +func startWatcher(options *config.ApplicationConfig) { + if options.DynamicConfigsDir == "" { + // No need to start the watcher if the directory is not set + return + } + + if _, err := os.Stat(options.DynamicConfigsDir); err != nil { + if os.IsNotExist(err) { + // We try to create the directory if it does not exist and was specified + if err := os.MkdirAll(options.DynamicConfigsDir, 0700); err != nil { + log.Error().Err(err).Msg("failed creating DynamicConfigsDir") + } + } else { + // something else happened, we log the error and don't start the watcher + log.Error().Err(err).Msg("failed to read DynamicConfigsDir, watcher will not be started") + return + } + } + + configHandler := newConfigFileHandler(options) + if err := configHandler.Watch(); err != nil { + log.Error().Err(err).Msg("failed creating watcher") + } +} + +// In Lieu of a proper DI framework, this function wires up the Application manually. +// This is in core/startup rather than core/state.go to keep package references clean! +func createApplication(appConfig *config.ApplicationConfig) *core.Application { + app := &core.Application{ + ApplicationConfig: appConfig, + BackendConfigLoader: config.NewBackendConfigLoader(appConfig.ModelPath), + ModelLoader: model.NewModelLoader(appConfig.ModelPath), + } + + var err error + + // app.EmbeddingsBackendService = backend.NewEmbeddingsBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) + // app.ImageGenerationBackendService = backend.NewImageGenerationBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) + // app.LLMBackendService = backend.NewLLMBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) + // app.TranscriptionBackendService = backend.NewTranscriptionBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) + // app.TextToSpeechBackendService = backend.NewTextToSpeechBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) + + app.BackendMonitorService = services.NewBackendMonitorService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig) + app.GalleryService = services.NewGalleryService(app.ApplicationConfig) + // app.OpenAIService = services.NewOpenAIService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig, app.LLMBackendService) + + app.LocalAIMetricsService, err = services.NewLocalAIMetricsService() + if err != nil { + log.Error().Err(err).Msg("encountered an error initializing metrics service, startup will continue but metrics will not be tracked.") + } + + return app +} From 9bd7f3f995c6a9c9d8e4cab49cb1970a70629efc Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 22 Sep 2024 10:04:20 +0200 Subject: [PATCH 0167/1530] feat(coqui): switch to maintained community fork (#3625) Fixes: https://github.com/mudler/LocalAI/issues/2513 Signed-off-by: Ettore Di Giacinto --- backend/python/coqui/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index d7708363..2a91f2b9 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,4 @@ -TTS==0.22.0 +coqui-tts grpcio==1.66.1 protobuf certifi \ No newline at end of file From 56f4deb938ee045b2df3b517b7e25c28df252ef5 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 22 Sep 2024 15:19:38 +0200 Subject: [PATCH 0168/1530] chore(ci): split hipblas jobs Signed-off-by: Ettore Di Giacinto --- .github/workflows/image.yml | 115 ++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 43 deletions(-) diff --git a/.github/workflows/image.yml b/.github/workflows/image.yml index 395d7761..f57cf770 100644 --- a/.github/workflows/image.yml +++ b/.github/workflows/image.yml @@ -13,6 +13,78 @@ concurrency: cancel-in-progress: true jobs: + hipblas-jobs: + uses: ./.github/workflows/image_build.yml + with: + tag-latest: ${{ matrix.tag-latest }} + tag-suffix: ${{ matrix.tag-suffix }} + ffmpeg: ${{ matrix.ffmpeg }} + image-type: ${{ matrix.image-type }} + build-type: ${{ matrix.build-type }} + cuda-major-version: ${{ matrix.cuda-major-version }} + cuda-minor-version: ${{ matrix.cuda-minor-version }} + platforms: ${{ matrix.platforms }} + runs-on: ${{ matrix.runs-on }} + base-image: ${{ matrix.base-image }} + grpc-base-image: ${{ matrix.grpc-base-image }} + aio: ${{ matrix.aio }} + makeflags: ${{ matrix.makeflags }} + latest-image: ${{ matrix.latest-image }} + latest-image-aio: ${{ matrix.latest-image-aio }} + secrets: + dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }} + dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }} + quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }} + quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }} + strategy: + # Pushing with all jobs in parallel + # eats the bandwidth of all the nodes + max-parallel: 1 + matrix: + include: + - build-type: 'hipblas' + platforms: 'linux/amd64' + tag-latest: 'auto' + tag-suffix: '-hipblas-ffmpeg' + ffmpeg: 'true' + image-type: 'extras' + aio: "-aio-gpu-hipblas" + base-image: "rocm/dev-ubuntu-22.04:6.1" + grpc-base-image: "ubuntu:22.04" + latest-image: 'latest-gpu-hipblas' + latest-image-aio: 'latest-aio-gpu-hipblas' + runs-on: 'arc-runner-set' + makeflags: "--jobs=3 --output-sync=target" + - build-type: 'hipblas' + platforms: 'linux/amd64' + tag-latest: 'false' + tag-suffix: '-hipblas' + ffmpeg: 'false' + image-type: 'extras' + base-image: "rocm/dev-ubuntu-22.04:6.1" + grpc-base-image: "ubuntu:22.04" + runs-on: 'arc-runner-set' + makeflags: "--jobs=3 --output-sync=target" + - build-type: 'hipblas' + platforms: 'linux/amd64' + tag-latest: 'false' + tag-suffix: '-hipblas-ffmpeg-core' + ffmpeg: 'true' + image-type: 'core' + base-image: "rocm/dev-ubuntu-22.04:6.1" + grpc-base-image: "ubuntu:22.04" + runs-on: 'arc-runner-set' + makeflags: "--jobs=3 --output-sync=target" + - build-type: 'hipblas' + platforms: 'linux/amd64' + tag-latest: 'false' + tag-suffix: '-hipblas-core' + ffmpeg: 'false' + image-type: 'core' + base-image: "rocm/dev-ubuntu-22.04:6.1" + grpc-base-image: "ubuntu:22.04" + runs-on: 'arc-runner-set' + makeflags: "--jobs=3 --output-sync=target" self-hosted-jobs: uses: ./.github/workflows/image_build.yml with: @@ -122,29 +194,6 @@ jobs: base-image: "ubuntu:22.04" runs-on: 'arc-runner-set' makeflags: "--jobs=3 --output-sync=target" - - build-type: 'hipblas' - platforms: 'linux/amd64' - tag-latest: 'auto' - tag-suffix: '-hipblas-ffmpeg' - ffmpeg: 'true' - image-type: 'extras' - aio: "-aio-gpu-hipblas" - base-image: "rocm/dev-ubuntu-22.04:6.1" - grpc-base-image: "ubuntu:22.04" - latest-image: 'latest-gpu-hipblas' - latest-image-aio: 'latest-aio-gpu-hipblas' - runs-on: 'arc-runner-set' - makeflags: "--jobs=3 --output-sync=target" - - build-type: 'hipblas' - platforms: 'linux/amd64' - tag-latest: 'false' - tag-suffix: '-hipblas' - ffmpeg: 'false' - image-type: 'extras' - base-image: "rocm/dev-ubuntu-22.04:6.1" - grpc-base-image: "ubuntu:22.04" - runs-on: 'arc-runner-set' - makeflags: "--jobs=3 --output-sync=target" - build-type: 'sycl_f16' platforms: 'linux/amd64' tag-latest: 'auto' @@ -212,26 +261,6 @@ jobs: image-type: 'core' runs-on: 'arc-runner-set' makeflags: "--jobs=3 --output-sync=target" - - build-type: 'hipblas' - platforms: 'linux/amd64' - tag-latest: 'false' - tag-suffix: '-hipblas-ffmpeg-core' - ffmpeg: 'true' - image-type: 'core' - base-image: "rocm/dev-ubuntu-22.04:6.1" - grpc-base-image: "ubuntu:22.04" - runs-on: 'arc-runner-set' - makeflags: "--jobs=3 --output-sync=target" - - build-type: 'hipblas' - platforms: 'linux/amd64' - tag-latest: 'false' - tag-suffix: '-hipblas-core' - ffmpeg: 'false' - image-type: 'core' - base-image: "rocm/dev-ubuntu-22.04:6.1" - grpc-base-image: "ubuntu:22.04" - runs-on: 'arc-runner-set' - makeflags: "--jobs=3 --output-sync=target" core-image-build: uses: ./.github/workflows/image_build.yml From fd70a22196ffc430e286c14e65497dd22f9d3b63 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 22 Sep 2024 15:21:16 +0200 Subject: [PATCH 0169/1530] chore(ci): adjust parallel jobs Signed-off-by: Ettore Di Giacinto --- .github/workflows/image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/image.yml b/.github/workflows/image.yml index f57cf770..8709f05c 100644 --- a/.github/workflows/image.yml +++ b/.github/workflows/image.yml @@ -111,7 +111,7 @@ jobs: strategy: # Pushing with all jobs in parallel # eats the bandwidth of all the nodes - max-parallel: ${{ github.event_name != 'pull_request' && 6 || 10 }} + max-parallel: ${{ github.event_name != 'pull_request' && 5 || 8 }} matrix: include: # Extra images From 4edd8c80b407ea415e4cbede6386f8d17efa8f8f Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 22 Sep 2024 23:41:34 +0200 Subject: [PATCH 0170/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `c35e586ea57221844442c65a1172498c54971cb0` (#3629) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 51755e71..fe086645 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=d09770cae71b416c032ec143dda530f7413c4038 +CPPLLAMA_VERSION?=c35e586ea57221844442c65a1172498c54971cb0 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 3e8e71f8b68f9ea843f57f5bebb9aad32700e0ac Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 10:56:10 +0200 Subject: [PATCH 0171/1530] fix(ci): fixup checksum scanning pipeline (#3631) Signed-off-by: Ettore Di Giacinto --- .github/check_and_update.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/check_and_update.py b/.github/check_and_update.py index dcf1d04a..704b658e 100644 --- a/.github/check_and_update.py +++ b/.github/check_and_update.py @@ -29,9 +29,14 @@ def calculate_sha256(file_path): def manual_safety_check_hf(repo_id): scanResponse = requests.get('https://huggingface.co/api/models/' + repo_id + "/scan") scan = scanResponse.json() - if scan['hasUnsafeFile']: - return scan - return None + # Check if 'hasUnsafeFile' exists in the response + if 'hasUnsafeFile' in scan: + if scan['hasUnsafeFile']: + return scan + else: + return None + else: + return None download_type, repo_id_or_url = parse_uri(uri) From 51cba89682b40ae92737fa47ce6bdbce9ba8cac6 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 11:49:07 +0200 Subject: [PATCH 0172/1530] fix(hipblas): do not push all variants to hipblas builds (#3630) Like with CUDA builds, we don't need all the variants when we are compiling against the accelerated variants - in this way we save space and we avoid to exceed embedFS golang size limits. Signed-off-by: Ettore Di Giacinto --- Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index f08cb9a0..323c3d9a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -297,10 +297,10 @@ COPY .git . RUN make prepare ## Build the binary -## If it's CUDA, we want to skip some of the llama-compat backends to save space -## We only leave the most CPU-optimized variant and the fallback for the cublas build -## (both will use CUDA for the actual computation) -RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \ +## If it's CUDA or hipblas, we want to skip some of the llama-compat backends to save space +## We only leave the most CPU-optimized variant and the fallback for the cublas/hipblas build +## (both will use CUDA or hipblas for the actual computation) +RUN if [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \ SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \ else \ make build; \ From bf8f8671d1b1daae8f1a1f446ab8f6366ddb4396 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 19:04:36 +0200 Subject: [PATCH 0173/1530] chore(ci): adjust parallelism Signed-off-by: Ettore Di Giacinto --- .github/workflows/image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/image.yml b/.github/workflows/image.yml index 8709f05c..6db8bb07 100644 --- a/.github/workflows/image.yml +++ b/.github/workflows/image.yml @@ -39,7 +39,7 @@ jobs: strategy: # Pushing with all jobs in parallel # eats the bandwidth of all the nodes - max-parallel: 1 + max-parallel: 2 matrix: include: - build-type: 'hipblas' From 1da8d8b9db431a62756dd2976d00531b316b0dfa Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 19:09:51 +0200 Subject: [PATCH 0174/1530] models(gallery): add nightygurps-14b-v1.1 (#3633) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 7dab9eb7..1b84c403 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -654,6 +654,22 @@ - filename: Llama3.1-8B-ShiningValiant2-Q4_K_M.gguf sha256: 9369eb97922a9f01e4eae610e3d7aaeca30762d78d9239884179451d60bdbdd2 uri: huggingface://bartowski/Llama3.1-8B-ShiningValiant2-GGUF/Llama3.1-8B-ShiningValiant2-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "nightygurps-14b-v1.1" + icon: https://cdn-uploads.huggingface.co/production/uploads/6336c5b3e3ac69e6a90581da/FvfjK7bKqsWdaBkB3eWgP.png + urls: + - https://huggingface.co/AlexBefest/NightyGurps-14b-v1.1 + - https://huggingface.co/bartowski/NightyGurps-14b-v1.1-GGUF + description: | + This model works with Russian only. + This model is designed to run GURPS roleplaying games, as well as consult and assist. This model was trained on an augmented dataset of the GURPS Basic Set rulebook. Its primary purpose was initially to become an assistant consultant and assistant Game Master for the GURPS roleplaying system, but it can also be used as a GM for running solo games as a player. + overrides: + parameters: + model: NightyGurps-14b-v1.1-Q4_K_M.gguf + files: + - filename: NightyGurps-14b-v1.1-Q4_K_M.gguf + sha256: d09d53259ad2c0298150fa8c2db98fe42f11731af89fdc80ad0e255a19adc4b0 + uri: huggingface://bartowski/NightyGurps-14b-v1.1-GGUF/NightyGurps-14b-v1.1-Q4_K_M.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From 26d99ed1c714652ae118e27768273b5b98e7bbf4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 19:12:54 +0200 Subject: [PATCH 0175/1530] models(gallery): add gemma-2-9b-arliai-rpmax-v1.1 (#3634) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 1b84c403..bddd6b16 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2109,6 +2109,20 @@ - filename: Buddy-2B-v1-Q4_K_M.gguf sha256: 9bd25ed907d1a3c2e07fe09399a9b3aec107d368c29896e2c46facede5b7e3d5 uri: huggingface://bartowski/Buddy-2B-v1-GGUF/Buddy-2B-v1-Q4_K_M.gguf +- !!merge <<: *gemma + name: "gemma-2-9b-arliai-rpmax-v1.1" + urls: + - https://huggingface.co/ArliAI/Gemma-2-9B-ArliAI-RPMax-v1.1 + - https://huggingface.co/bartowski/Gemma-2-9B-ArliAI-RPMax-v1.1-GGUF + description: | + RPMax is a series of models that are trained on a diverse set of curated creative writing and RP datasets with a focus on variety and deduplication. This model is designed to be highly creative and non-repetitive by making sure no two entries in the dataset have repeated characters or situations, which makes sure the model does not latch on to a certain personality and be capable of understanding and acting appropriately to any characters or situations. + overrides: + parameters: + model: Gemma-2-9B-ArliAI-RPMax-v1.1-Q4_K_M.gguf + files: + - filename: Gemma-2-9B-ArliAI-RPMax-v1.1-Q4_K_M.gguf + sha256: 1724aff0ad6f71bf4371d839aca55578f7ec6f030d8d25c0254126088e4c6250 + uri: huggingface://bartowski/Gemma-2-9B-ArliAI-RPMax-v1.1-GGUF/Gemma-2-9B-ArliAI-RPMax-v1.1-Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From e332ff80660fd3f23ecf67acd2807d22c9cafc85 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 19:16:41 +0200 Subject: [PATCH 0176/1530] models(gallery): add gemma-2-2b-arliai-rpmax-v1.1 (#3635) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index bddd6b16..f75e448c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2123,6 +2123,19 @@ - filename: Gemma-2-9B-ArliAI-RPMax-v1.1-Q4_K_M.gguf sha256: 1724aff0ad6f71bf4371d839aca55578f7ec6f030d8d25c0254126088e4c6250 uri: huggingface://bartowski/Gemma-2-9B-ArliAI-RPMax-v1.1-GGUF/Gemma-2-9B-ArliAI-RPMax-v1.1-Q4_K_M.gguf +- !!merge <<: *gemma + name: "gemma-2-2b-arliai-rpmax-v1.1" + urls: + - https://huggingface.co/bartowski/Gemma-2-2B-ArliAI-RPMax-v1.1-GGUF + description: | + RPMax is a series of models that are trained on a diverse set of curated creative writing and RP datasets with a focus on variety and deduplication. This model is designed to be highly creative and non-repetitive by making sure no two entries in the dataset have repeated characters or situations, which makes sure the model does not latch on to a certain personality and be capable of understanding and acting appropriately to any characters or situations. + overrides: + parameters: + model: Gemma-2-2B-ArliAI-RPMax-v1.1-Q4_K_M.gguf + files: + - filename: Gemma-2-2B-ArliAI-RPMax-v1.1-Q4_K_M.gguf + sha256: 89fe35345754d7e9de8d0c0d5bf35b2be9b12a09811b365b712b8b27112f7712 + uri: huggingface://bartowski/Gemma-2-2B-ArliAI-RPMax-v1.1-GGUF/Gemma-2-2B-ArliAI-RPMax-v1.1-Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From bbdf78615e72a8dfd5e80b9e1db1c804741fb4e5 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 23 Sep 2024 19:24:14 +0200 Subject: [PATCH 0177/1530] models(gallery): add acolyte-22b-i1 (#3636) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f75e448c..9b8a0220 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1523,6 +1523,21 @@ - filename: Pantheon-RP-1.6-12b-Nemo-Q4_K_M.gguf sha256: cf3465c183bf4ecbccd1b6b480f687e0160475b04c87e2f1e5ebc8baa0f4c7aa uri: huggingface://bartowski/Pantheon-RP-1.6-12b-Nemo-GGUF/Pantheon-RP-1.6-12b-Nemo-Q4_K_M.gguf +- !!merge <<: *mistral03 + name: "acolyte-22b-i1" + icon: https://cdn-uploads.huggingface.co/production/uploads/6569a4ed2419be6072890cf8/3dcGMcrWK2-2vQh9QBt3o.png + urls: + - https://huggingface.co/rAIfle/Acolyte-22B + - https://huggingface.co/mradermacher/Acolyte-22B-i1-GGUF + description: | + LoRA of a bunch of random datasets on top of Mistral-Small-Instruct-2409, then SLERPed onto base at 0.5. Decent enough for its size. Check the LoRA for dataset info. + overrides: + parameters: + model: Acolyte-22B.i1-Q4_K_M.gguf + files: + - filename: Acolyte-22B.i1-Q4_K_M.gguf + sha256: 5a454405b98b6f886e8e4c695488d8ea098162bb8c46f2a7723fc2553c6e2f6e + uri: huggingface://mradermacher/Acolyte-22B-i1-GGUF/Acolyte-22B.i1-Q4_K_M.gguf - !!merge <<: *mistral03 name: "mn-12b-lyra-v4-iq-imatrix" icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/dVoru83WOpwVjMlgZ_xhA.png From 043cb94436ab44c30f160cc68423aa8915ec800f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:23:21 +0000 Subject: [PATCH 0178/1530] chore(deps): Bump yarl from 1.11.0 to 1.11.1 in /examples/langchain/langchainpy-localai-example (#3643) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.11.0 to 1.11.1. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.11.0...v1.11.1) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 98325db3..3e4133ca 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.2 -yarl==1.11.0 +yarl==1.11.1 From cc6fac1688e5f700baa1d460106861abc7a1d2f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 01:16:39 +0000 Subject: [PATCH 0179/1530] chore(deps): Bump urllib3 from 2.2.2 to 2.2.3 in /examples/langchain/langchainpy-localai-example (#3646) chore(deps): Bump urllib3 Bumps [urllib3](https://github.com/urllib3/urllib3) from 2.2.2 to 2.2.3. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/2.2.2...2.2.3) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 3e4133ca..675429a3 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -29,5 +29,5 @@ tenacity==8.5.0 tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 -urllib3==2.2.2 +urllib3==2.2.3 yarl==1.11.1 From b8e129f2a6541a23f9c0b595ba12daa7e41a5a18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 02:53:35 +0000 Subject: [PATCH 0180/1530] chore(deps): Bump idna from 3.8 to 3.10 in /examples/langchain/langchainpy-localai-example (#3644) chore(deps): Bump idna Bumps [idna](https://github.com/kjd/idna) from 3.8 to 3.10. - [Release notes](https://github.com/kjd/idna/releases) - [Changelog](https://github.com/kjd/idna/blob/master/HISTORY.rst) - [Commits](https://github.com/kjd/idna/compare/v3.8...v3.10) --- updated-dependencies: - dependency-name: idna dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 675429a3..64a43bea 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -9,7 +9,7 @@ dataclasses-json==0.6.7 debugpy==1.8.2 frozenlist==1.4.1 greenlet==3.1.0 -idna==3.8 +idna==3.10 langchain==0.3.0 langchain-community==0.2.16 marshmallow==3.22.0 From c1752cbb831fe9ccb3dd113202884d4f670afbb7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 04:30:05 +0000 Subject: [PATCH 0181/1530] chore(deps): Bump sqlalchemy from 2.0.32 to 2.0.35 in /examples/langchain/langchainpy-localai-example (#3649) chore(deps): Bump sqlalchemy Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 2.0.32 to 2.0.35. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 64a43bea..ac147410 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -24,7 +24,7 @@ packaging>=23.2 pydantic==2.8.2 PyYAML==6.0.2 requests==2.32.3 -SQLAlchemy==2.0.32 +SQLAlchemy==2.0.35 tenacity==8.5.0 tqdm==4.66.5 typing-inspect==0.9.0 From 69d2902b0a6e7647e16092118d73f779d80f266e Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 24 Sep 2024 09:31:28 +0200 Subject: [PATCH 0182/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `f0c7b5edf82aa200656fd88c11ae3a805d7130bf` (#3653) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fe086645..578656e5 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=c35e586ea57221844442c65a1172498c54971cb0 +CPPLLAMA_VERSION?=f0c7b5edf82aa200656fd88c11ae3a805d7130bf # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 90cacb9692f3dc374766b0e32f75be8229a47db3 Mon Sep 17 00:00:00 2001 From: Dave Date: Tue, 24 Sep 2024 03:32:48 -0400 Subject: [PATCH 0183/1530] test: preliminary tests and merge fix for authv2 (#3584) * add api key to existing app tests, add preliminary auth test Signed-off-by: Dave Lee * small fix, run test Signed-off-by: Dave Lee * status on non-opaque Signed-off-by: Dave Lee * tweak auth error Signed-off-by: Dave Lee * exp Signed-off-by: Dave Lee * quick fix on real laptop Signed-off-by: Dave Lee * add downloader version that allows providing an auth header Signed-off-by: Dave Lee * stash some devcontainer fixes during testing Signed-off-by: Dave Lee * s2 Signed-off-by: Dave Lee * s Signed-off-by: Dave Lee * done with experiment Signed-off-by: Dave Lee * done with experiment Signed-off-by: Dave Lee * after merge fix Signed-off-by: Dave Lee * rename and fix Signed-off-by: Dave Lee --------- Signed-off-by: Dave Lee Co-authored-by: Ettore Di Giacinto --- .devcontainer-scripts/utils.sh | 2 + Dockerfile | 5 +-- Makefile | 3 ++ core/gallery/gallery.go | 4 +- core/gallery/models.go | 2 +- core/http/app.go | 18 --------- core/http/app_test.go | 69 ++++++++++++++++++++++++++++++---- core/http/middleware/auth.go | 3 +- embedded/embedded.go | 2 +- go.mod | 4 +- pkg/downloader/uri.go | 18 +++++++-- pkg/downloader/uri_test.go | 6 +-- 12 files changed, 95 insertions(+), 41 deletions(-) diff --git a/.devcontainer-scripts/utils.sh b/.devcontainer-scripts/utils.sh index 98ac063c..8416d43d 100644 --- a/.devcontainer-scripts/utils.sh +++ b/.devcontainer-scripts/utils.sh @@ -9,6 +9,7 @@ # Param 2: email # config_user() { + echo "Configuring git for $1 <$2>" local gcn=$(git config --global user.name) if [ -z "${gcn}" ]; then echo "Setting up git user / remote" @@ -24,6 +25,7 @@ config_user() { # Param 2: remote url # config_remote() { + echo "Adding git remote and fetching $2 as $1" local gr=$(git remote -v | grep $1) if [ -z "${gr}" ]; then git remote add $1 $2 diff --git a/Dockerfile b/Dockerfile index 323c3d9a..8c657469 100644 --- a/Dockerfile +++ b/Dockerfile @@ -338,9 +338,8 @@ RUN if [ "${FFMPEG}" = "true" ]; then \ RUN apt-get update && \ apt-get install -y --no-install-recommends \ - ssh less && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* + ssh less wget +# For the devcontainer, leave apt functional in case additional devtools are needed at runtime. RUN go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/Makefile b/Makefile index 578656e5..7523d5ff 100644 --- a/Makefile +++ b/Makefile @@ -359,6 +359,9 @@ clean-tests: rm -rf test-dir rm -rf core/http/backend-assets +clean-dc: clean + cp -r /build/backend-assets /workspace/backend-assets + ## Build: build: prepare backend-assets grpcs ## Build the project $(info ${GREEN}I local-ai build info:${RESET}) diff --git a/core/gallery/gallery.go b/core/gallery/gallery.go index 6ced6244..3a60e618 100644 --- a/core/gallery/gallery.go +++ b/core/gallery/gallery.go @@ -132,7 +132,7 @@ func AvailableGalleryModels(galleries []config.Gallery, basePath string) ([]*Gal func findGalleryURLFromReferenceURL(url string, basePath string) (string, error) { var refFile string uri := downloader.URI(url) - err := uri.DownloadAndUnmarshal(basePath, func(url string, d []byte) error { + err := uri.DownloadWithCallback(basePath, func(url string, d []byte) error { refFile = string(d) if len(refFile) == 0 { return fmt.Errorf("invalid reference file at url %s: %s", url, d) @@ -156,7 +156,7 @@ func getGalleryModels(gallery config.Gallery, basePath string) ([]*GalleryModel, } uri := downloader.URI(gallery.URL) - err := uri.DownloadAndUnmarshal(basePath, func(url string, d []byte) error { + err := uri.DownloadWithCallback(basePath, func(url string, d []byte) error { return yaml.Unmarshal(d, &models) }) if err != nil { diff --git a/core/gallery/models.go b/core/gallery/models.go index dec6312e..58f1963a 100644 --- a/core/gallery/models.go +++ b/core/gallery/models.go @@ -69,7 +69,7 @@ type PromptTemplate struct { func GetGalleryConfigFromURL(url string, basePath string) (Config, error) { var config Config uri := downloader.URI(url) - err := uri.DownloadAndUnmarshal(basePath, func(url string, d []byte) error { + err := uri.DownloadWithCallback(basePath, func(url string, d []byte) error { return yaml.Unmarshal(d, &config) }) if err != nil { diff --git a/core/http/app.go b/core/http/app.go index fa9cd866..23e97f18 100644 --- a/core/http/app.go +++ b/core/http/app.go @@ -31,24 +31,6 @@ import ( "github.com/rs/zerolog/log" ) -func readAuthHeader(c *fiber.Ctx) string { - authHeader := c.Get("Authorization") - - // elevenlabs - xApiKey := c.Get("xi-api-key") - if xApiKey != "" { - authHeader = "Bearer " + xApiKey - } - - // anthropic - xApiKey = c.Get("x-api-key") - if xApiKey != "" { - authHeader = "Bearer " + xApiKey - } - - return authHeader -} - // Embed a directory // //go:embed static/* diff --git a/core/http/app_test.go b/core/http/app_test.go index 86fe7fdd..bbe52c34 100644 --- a/core/http/app_test.go +++ b/core/http/app_test.go @@ -31,6 +31,9 @@ import ( "github.com/sashabaranov/go-openai/jsonschema" ) +const apiKey = "joshua" +const bearerKey = "Bearer " + apiKey + const testPrompt = `### System: You are an AI assistant that follows instruction extremely well. Help as much as you can. @@ -50,11 +53,19 @@ type modelApplyRequest struct { func getModelStatus(url string) (response map[string]interface{}) { // Create the HTTP request - resp, err := http.Get(url) + req, err := http.NewRequest("GET", url, nil) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", bearerKey) if err != nil { fmt.Println("Error creating request:", err) return } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + fmt.Println("Error sending request:", err) + return + } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) @@ -72,14 +83,15 @@ func getModelStatus(url string) (response map[string]interface{}) { return } -func getModels(url string) (response []gallery.GalleryModel) { +func getModels(url string) ([]gallery.GalleryModel, error) { + response := []gallery.GalleryModel{} uri := downloader.URI(url) // TODO: No tests currently seem to exercise file:// urls. Fix? - uri.DownloadAndUnmarshal("", func(url string, i []byte) error { + err := uri.DownloadWithAuthorizationAndCallback("", bearerKey, func(url string, i []byte) error { // Unmarshal YAML data into a struct return json.Unmarshal(i, &response) }) - return + return response, err } func postModelApplyRequest(url string, request modelApplyRequest) (response map[string]interface{}) { @@ -101,6 +113,7 @@ func postModelApplyRequest(url string, request modelApplyRequest) (response map[ return } req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", bearerKey) // Make the request client := &http.Client{} @@ -140,6 +153,7 @@ func postRequestJSON[B any](url string, bodyJson *B) error { } req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", bearerKey) client := &http.Client{} resp, err := client.Do(req) @@ -175,6 +189,7 @@ func postRequestResponseJSON[B1 any, B2 any](url string, reqJson *B1, respJson * } req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", bearerKey) client := &http.Client{} resp, err := client.Do(req) @@ -195,6 +210,35 @@ func postRequestResponseJSON[B1 any, B2 any](url string, reqJson *B1, respJson * return json.Unmarshal(body, respJson) } +func postInvalidRequest(url string) (error, int) { + + req, err := http.NewRequest("POST", url, bytes.NewBufferString("invalid request")) + if err != nil { + return err, -1 + } + + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err, -1 + } + + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return err, -1 + } + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)), resp.StatusCode + } + + return nil, resp.StatusCode +} + //go:embed backend-assets/* var backendAssets embed.FS @@ -260,6 +304,7 @@ var _ = Describe("API test", func() { config.WithContext(c), config.WithGalleries(galleries), config.WithModelPath(modelDir), + config.WithApiKeys([]string{apiKey}), config.WithBackendAssets(backendAssets), config.WithBackendAssetsOutput(backendAssetsDir))...) Expect(err).ToNot(HaveOccurred()) @@ -269,7 +314,7 @@ var _ = Describe("API test", func() { go app.Listen("127.0.0.1:9090") - defaultConfig := openai.DefaultConfig("") + defaultConfig := openai.DefaultConfig(apiKey) defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" client2 = openaigo.NewClient("") @@ -295,10 +340,19 @@ var _ = Describe("API test", func() { Expect(err).To(HaveOccurred()) }) + Context("Auth Tests", func() { + It("Should fail if the api key is missing", func() { + err, sc := postInvalidRequest("http://127.0.0.1:9090/models/available") + Expect(err).ToNot(BeNil()) + Expect(sc).To(Equal(403)) + }) + }) + Context("Applying models", func() { It("applies models from a gallery", func() { - models := getModels("http://127.0.0.1:9090/models/available") + models, err := getModels("http://127.0.0.1:9090/models/available") + Expect(err).To(BeNil()) Expect(len(models)).To(Equal(2), fmt.Sprint(models)) Expect(models[0].Installed).To(BeFalse(), fmt.Sprint(models)) Expect(models[1].Installed).To(BeFalse(), fmt.Sprint(models)) @@ -331,7 +385,8 @@ var _ = Describe("API test", func() { Expect(content["backend"]).To(Equal("bert-embeddings")) Expect(content["foo"]).To(Equal("bar")) - models = getModels("http://127.0.0.1:9090/models/available") + models, err = getModels("http://127.0.0.1:9090/models/available") + Expect(err).To(BeNil()) Expect(len(models)).To(Equal(2), fmt.Sprint(models)) Expect(models[0].Name).To(Or(Equal("bert"), Equal("bert2"))) Expect(models[1].Name).To(Or(Equal("bert"), Equal("bert2"))) diff --git a/core/http/middleware/auth.go b/core/http/middleware/auth.go index bc8bcf80..d2152e9b 100644 --- a/core/http/middleware/auth.go +++ b/core/http/middleware/auth.go @@ -38,6 +38,7 @@ func getApiKeyErrorHandler(applicationConfig *config.ApplicationConfig) fiber.Er if applicationConfig.OpaqueErrors { return ctx.SendStatus(403) } + return ctx.Status(403).SendString(err.Error()) } if applicationConfig.OpaqueErrors { return ctx.SendStatus(500) @@ -90,4 +91,4 @@ func getApiKeyRequiredFilterFunction(applicationConfig *config.ApplicationConfig } } return func(c *fiber.Ctx) bool { return false } -} \ No newline at end of file +} diff --git a/embedded/embedded.go b/embedded/embedded.go index 672c32ed..3a4ea262 100644 --- a/embedded/embedded.go +++ b/embedded/embedded.go @@ -39,7 +39,7 @@ func init() { func GetRemoteLibraryShorteners(url string, basePath string) (map[string]string, error) { remoteLibrary := map[string]string{} uri := downloader.URI(url) - err := uri.DownloadAndUnmarshal(basePath, func(_ string, i []byte) error { + err := uri.DownloadWithCallback(basePath, func(_ string, i []byte) error { return yaml.Unmarshal(i, &remoteLibrary) }) if err != nil { diff --git a/go.mod b/go.mod index a3359abf..dd8fce9f 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/mudler/LocalAI -go 1.22.0 +go 1.23 -toolchain go1.22.4 +toolchain go1.23.1 require ( dario.cat/mergo v1.0.0 diff --git a/pkg/downloader/uri.go b/pkg/downloader/uri.go index 7fedd646..9acbb621 100644 --- a/pkg/downloader/uri.go +++ b/pkg/downloader/uri.go @@ -31,7 +31,11 @@ const ( type URI string -func (uri URI) DownloadAndUnmarshal(basePath string, f func(url string, i []byte) error) error { +func (uri URI) DownloadWithCallback(basePath string, f func(url string, i []byte) error) error { + return uri.DownloadWithAuthorizationAndCallback(basePath, "", f) +} + +func (uri URI) DownloadWithAuthorizationAndCallback(basePath string, authorization string, f func(url string, i []byte) error) error { url := uri.ResolveURL() if strings.HasPrefix(url, LocalPrefix) { @@ -41,7 +45,6 @@ func (uri URI) DownloadAndUnmarshal(basePath string, f func(url string, i []byte if err != nil { return err } - // ??? resolvedBasePath, err := filepath.EvalSymlinks(basePath) if err != nil { return err @@ -63,7 +66,16 @@ func (uri URI) DownloadAndUnmarshal(basePath string, f func(url string, i []byte } // Send a GET request to the URL - response, err := http.Get(url) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + if authorization != "" { + req.Header.Add("Authorization", authorization) + } + + response, err := http.DefaultClient.Do(req) if err != nil { return err } diff --git a/pkg/downloader/uri_test.go b/pkg/downloader/uri_test.go index 21a093a9..3b7a80b3 100644 --- a/pkg/downloader/uri_test.go +++ b/pkg/downloader/uri_test.go @@ -11,7 +11,7 @@ var _ = Describe("Gallery API tests", func() { It("parses github with a branch", func() { uri := URI("github:go-skynet/model-gallery/gpt4all-j.yaml") Expect( - uri.DownloadAndUnmarshal("", func(url string, i []byte) error { + uri.DownloadWithCallback("", func(url string, i []byte) error { Expect(url).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml")) return nil }), @@ -21,7 +21,7 @@ var _ = Describe("Gallery API tests", func() { uri := URI("github:go-skynet/model-gallery/gpt4all-j.yaml@main") Expect( - uri.DownloadAndUnmarshal("", func(url string, i []byte) error { + uri.DownloadWithCallback("", func(url string, i []byte) error { Expect(url).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml")) return nil }), @@ -30,7 +30,7 @@ var _ = Describe("Gallery API tests", func() { It("parses github with urls", func() { uri := URI("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml") Expect( - uri.DownloadAndUnmarshal("", func(url string, i []byte) error { + uri.DownloadWithCallback("", func(url string, i []byte) error { Expect(url).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml")) return nil }), From 0893d3cbbebc6f7c5fa1d65e4b17e7d900ae60d4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 24 Sep 2024 20:25:59 +0200 Subject: [PATCH 0184/1530] fix(health): do not require auth for /healthz and /readyz (#3656) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(health): do not require auth for /healthz and /readyz Fixes: #3655 Signed-off-by: Ettore Di Giacinto * Comment so I don’t forget Adding a reminder here... --------- Signed-off-by: Ettore Di Giacinto Co-authored-by: Dave --- core/http/app.go | 3 +++ core/http/routes/health.go | 13 +++++++++++++ core/http/routes/localai.go | 8 -------- 3 files changed, 16 insertions(+), 8 deletions(-) create mode 100644 core/http/routes/health.go diff --git a/core/http/app.go b/core/http/app.go index 23e97f18..2cf0ad17 100644 --- a/core/http/app.go +++ b/core/http/app.go @@ -121,6 +121,9 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi }) } + // Health Checks should always be exempt from auth, so register these first + routes.HealthRoutes(app) + kaConfig, err := middleware.GetKeyAuthConfig(appConfig) if err != nil || kaConfig == nil { return nil, fmt.Errorf("failed to create key auth config: %w", err) diff --git a/core/http/routes/health.go b/core/http/routes/health.go new file mode 100644 index 00000000..f5a08e9b --- /dev/null +++ b/core/http/routes/health.go @@ -0,0 +1,13 @@ +package routes + +import "github.com/gofiber/fiber/v2" + +func HealthRoutes(app *fiber.App) { + // Service health checks + ok := func(c *fiber.Ctx) error { + return c.SendStatus(200) + } + + app.Get("/healthz", ok) + app.Get("/readyz", ok) +} diff --git a/core/http/routes/localai.go b/core/http/routes/localai.go index 247596c0..2f65e779 100644 --- a/core/http/routes/localai.go +++ b/core/http/routes/localai.go @@ -42,14 +42,6 @@ func RegisterLocalAIRoutes(app *fiber.App, app.Post("/stores/get", localai.StoresGetEndpoint(sl, appConfig)) app.Post("/stores/find", localai.StoresFindEndpoint(sl, appConfig)) - // Kubernetes health checks - ok := func(c *fiber.Ctx) error { - return c.SendStatus(200) - } - - app.Get("/healthz", ok) - app.Get("/readyz", ok) - app.Get("/metrics", localai.LocalAIMetricsEndpoint()) // Experimental Backend Statistics Module From 6555994060662db4c3600c0d51b18e10f5cac890 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:22:08 +0000 Subject: [PATCH 0185/1530] chore(deps): Bump sentence-transformers from 3.1.0 to 3.1.1 in /backend/python/sentencetransformers (#3651) chore(deps): Bump sentence-transformers Bumps [sentence-transformers](https://github.com/UKPLab/sentence-transformers) from 3.1.0 to 3.1.1. - [Release notes](https://github.com/UKPLab/sentence-transformers/releases) - [Commits](https://github.com/UKPLab/sentence-transformers/compare/v3.1.0...v3.1.1) --- updated-dependencies: - dependency-name: sentence-transformers dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/sentencetransformers/requirements-cpu.txt | 2 +- backend/python/sentencetransformers/requirements-cublas11.txt | 2 +- backend/python/sentencetransformers/requirements-cublas12.txt | 2 +- backend/python/sentencetransformers/requirements-hipblas.txt | 2 +- backend/python/sentencetransformers/requirements-intel.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/python/sentencetransformers/requirements-cpu.txt b/backend/python/sentencetransformers/requirements-cpu.txt index f88de1e4..0fd8f35e 100644 --- a/backend/python/sentencetransformers/requirements-cpu.txt +++ b/backend/python/sentencetransformers/requirements-cpu.txt @@ -2,5 +2,5 @@ torch accelerate transformers bitsandbytes -sentence-transformers==3.1.0 +sentence-transformers==3.1.1 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas11.txt b/backend/python/sentencetransformers/requirements-cublas11.txt index 57caf1a1..92a10b16 100644 --- a/backend/python/sentencetransformers/requirements-cublas11.txt +++ b/backend/python/sentencetransformers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 torch accelerate -sentence-transformers==3.1.0 +sentence-transformers==3.1.1 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas12.txt b/backend/python/sentencetransformers/requirements-cublas12.txt index 834fa6a4..f68bb1b9 100644 --- a/backend/python/sentencetransformers/requirements-cublas12.txt +++ b/backend/python/sentencetransformers/requirements-cublas12.txt @@ -1,4 +1,4 @@ torch accelerate -sentence-transformers==3.1.0 +sentence-transformers==3.1.1 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-hipblas.txt b/backend/python/sentencetransformers/requirements-hipblas.txt index 98a0a41b..920eb855 100644 --- a/backend/python/sentencetransformers/requirements-hipblas.txt +++ b/backend/python/sentencetransformers/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 torch accelerate -sentence-transformers==3.1.0 +sentence-transformers==3.1.1 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-intel.txt b/backend/python/sentencetransformers/requirements-intel.txt index 5948910d..6ae4bdd4 100644 --- a/backend/python/sentencetransformers/requirements-intel.txt +++ b/backend/python/sentencetransformers/requirements-intel.txt @@ -4,5 +4,5 @@ torch optimum[openvino] setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 accelerate -sentence-transformers==3.1.0 +sentence-transformers==3.1.1 transformers \ No newline at end of file From c54cfd3609489c648859736b3038a322339a8bfd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:59:11 +0000 Subject: [PATCH 0186/1530] chore(deps): Bump pydantic from 2.8.2 to 2.9.2 in /examples/langchain/langchainpy-localai-example (#3648) chore(deps): Bump pydantic Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.8.2 to 2.9.2. - [Release notes](https://github.com/pydantic/pydantic/releases) - [Changelog](https://github.com/pydantic/pydantic/blob/main/HISTORY.md) - [Commits](https://github.com/pydantic/pydantic/compare/v2.8.2...v2.9.2) --- updated-dependencies: - dependency-name: pydantic dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index ac147410..179abc2a 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -21,7 +21,7 @@ numpy==2.1.1 openai==1.45.1 openapi-schema-pydantic==1.2.4 packaging>=23.2 -pydantic==2.8.2 +pydantic==2.9.2 PyYAML==6.0.2 requests==2.32.3 SQLAlchemy==2.0.35 From 0d784f46e55e39fb988c171c32ef664c9ff2801c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 01:15:53 +0000 Subject: [PATCH 0187/1530] chore(deps): Bump openai from 1.45.1 to 1.47.1 in /examples/functions (#3645) Bumps [openai](https://github.com/openai/openai-python) from 1.45.1 to 1.47.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.45.1...v1.47.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 670090d3..c3ffad01 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.0 -openai==1.45.1 +openai==1.47.1 From aa87eff28330a65818842884515ca1806165c209 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 25 Sep 2024 06:51:20 +0200 Subject: [PATCH 0188/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `70392f1f81470607ba3afef04aa56c9f65587664` (#3659) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Dave --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7523d5ff..6865f5a1 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=f0c7b5edf82aa200656fd88c11ae3a805d7130bf +CPPLLAMA_VERSION?=70392f1f81470607ba3afef04aa56c9f65587664 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From a370a11115879a9e02410f55136f563391976254 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 25 Sep 2024 08:47:03 +0200 Subject: [PATCH 0189/1530] docs: :arrow_up: update docs version mudler/LocalAI (#3657) :arrow_up: Update docs version mudler/LocalAI Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- docs/data/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data/version.json b/docs/data/version.json index dc128c66..0dba0428 100644 --- a/docs/data/version.json +++ b/docs/data/version.json @@ -1,3 +1,3 @@ { - "version": "v2.20.1" + "version": "v2.21.0" } From 1b8a77433a88ce1a56d364b4dc81d9030f4e2830 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 08:47:33 +0200 Subject: [PATCH 0190/1530] chore(deps): Bump llama-index from 0.11.7 to 0.11.12 in /examples/langchain-chroma (#3639) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.7 to 0.11.12. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.7...v0.11.12) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 4884d4aa..3f7bec69 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.0 openai==1.45.1 chromadb==0.5.5 -llama-index==0.11.7 \ No newline at end of file +llama-index==0.11.12 \ No newline at end of file From 8002ad27cb7b67f8489a5f3cda66437acf2aac74 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 08:47:57 +0200 Subject: [PATCH 0191/1530] chore(deps): Bump openai from 1.45.1 to 1.47.1 in /examples/langchain-chroma (#3641) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.45.1 to 1.47.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.45.1...v1.47.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 3f7bec69..0c77892d 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.0 -openai==1.45.1 +openai==1.47.1 chromadb==0.5.5 llama-index==0.11.12 \ No newline at end of file From 8c4f720fb578b3156c333448f298a55845857c58 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 08:48:13 +0200 Subject: [PATCH 0192/1530] chore(deps): Bump llama-index from 0.11.9 to 0.11.12 in /examples/chainlit (#3642) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.9 to 0.11.12. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.9...v0.11.12) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 1fe9356a..92eb113e 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.9 +llama_index==0.11.12 requests==2.32.3 weaviate_client==4.8.1 transformers From 74408bdc77e9f9d21a56699de09940fcaaf1a4eb Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:54:37 +0200 Subject: [PATCH 0193/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `0d2e2aed80109e8696791083bde3b58e190b7812` (#3658) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Dave Co-authored-by: Ettore Di Giacinto --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6865f5a1..121b8e50 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=34972dbe221709323714fc8402f2e24041d48213 +WHISPER_CPP_VERSION?=0d2e2aed80109e8696791083bde3b58e190b7812 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 33b2d38dd0198d78dbc26aa020acfb6ff4c4048c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:44:32 +0200 Subject: [PATCH 0194/1530] chore(deps): Bump chromadb from 0.5.5 to 0.5.7 in /examples/langchain-chroma (#3640) chore(deps): Bump chromadb in /examples/langchain-chroma Bumps [chromadb](https://github.com/chroma-core/chroma) from 0.5.5 to 0.5.7. - [Release notes](https://github.com/chroma-core/chroma/releases) - [Changelog](https://github.com/chroma-core/chroma/blob/main/RELEASE_PROCESS.md) - [Commits](https://github.com/chroma-core/chroma/compare/0.5.5...0.5.7) --- updated-dependencies: - dependency-name: chromadb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 0c77892d..19929482 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.0 openai==1.47.1 -chromadb==0.5.5 +chromadb==0.5.7 llama-index==0.11.12 \ No newline at end of file From a3d69872e35e152f29f7888fa9c56b0a797e9723 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 25 Sep 2024 18:00:23 +0200 Subject: [PATCH 0195/1530] feat(api): list loaded models in `/system` (#3661) feat(api): list loaded models in /system Signed-off-by: Ettore Di Giacinto --- core/http/endpoints/localai/system.go | 2 ++ core/schema/localai.go | 4 +++- pkg/model/initializers.go | 7 +++---- pkg/model/loader.go | 6 +++--- pkg/model/loader_test.go | 4 ++-- pkg/model/model.go | 4 +++- 6 files changed, 16 insertions(+), 11 deletions(-) diff --git a/core/http/endpoints/localai/system.go b/core/http/endpoints/localai/system.go index 11704933..23a725e3 100644 --- a/core/http/endpoints/localai/system.go +++ b/core/http/endpoints/localai/system.go @@ -17,12 +17,14 @@ func SystemInformations(ml *model.ModelLoader, appConfig *config.ApplicationConf if err != nil { return err } + loadedModels := ml.ListModels() for b := range appConfig.ExternalGRPCBackends { availableBackends = append(availableBackends, b) } return c.JSON( schema.SystemInformationResponse{ Backends: availableBackends, + Models: loadedModels, }, ) } diff --git a/core/schema/localai.go b/core/schema/localai.go index 9070c2be..75fa40c7 100644 --- a/core/schema/localai.go +++ b/core/schema/localai.go @@ -2,6 +2,7 @@ package schema import ( "github.com/mudler/LocalAI/core/p2p" + "github.com/mudler/LocalAI/pkg/model" gopsutil "github.com/shirou/gopsutil/v3/process" ) @@ -72,5 +73,6 @@ type P2PNodesResponse struct { } type SystemInformationResponse struct { - Backends []string `json:"backends"` + Backends []string `json:"backends"` + Models []model.Model `json:"loaded_models"` } diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 7099bf33..80dd10b4 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -311,11 +311,11 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string log.Debug().Msgf("GRPC Service Started") - client = NewModel(serverAddress) + client = NewModel(modelName, serverAddress) } else { log.Debug().Msg("external backend is uri") // address - client = NewModel(uri) + client = NewModel(modelName, uri) } } else { grpcProcess := backendPath(o.assetDir, backend) @@ -352,7 +352,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string log.Debug().Msgf("GRPC Service Started") - client = NewModel(serverAddress) + client = NewModel(modelName, serverAddress) } log.Debug().Msgf("Wait for the service to start up") @@ -419,7 +419,6 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e err := ml.StopGRPC(allExcept(o.model)) if err != nil { log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel") - return nil, err } } diff --git a/pkg/model/loader.go b/pkg/model/loader.go index f70d2cea..4f1ec841 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -105,13 +105,13 @@ FILE: return models, nil } -func (ml *ModelLoader) ListModels() []*Model { +func (ml *ModelLoader) ListModels() []Model { ml.mu.Lock() defer ml.mu.Unlock() - models := []*Model{} + models := []Model{} for _, model := range ml.models { - models = append(models, model) + models = append(models, *model) } return models diff --git a/pkg/model/loader_test.go b/pkg/model/loader_test.go index 4621844e..c16a6e50 100644 --- a/pkg/model/loader_test.go +++ b/pkg/model/loader_test.go @@ -63,7 +63,7 @@ var _ = Describe("ModelLoader", func() { Context("LoadModel", func() { It("should load a model and keep it in memory", func() { - mockModel = model.NewModel("test.model") + mockModel = model.NewModel("foo", "test.model") mockLoader := func(modelName, modelFile string) (*model.Model, error) { return mockModel, nil @@ -88,7 +88,7 @@ var _ = Describe("ModelLoader", func() { Context("ShutdownModel", func() { It("should shutdown a loaded model", func() { - mockModel = model.NewModel("test.model") + mockModel = model.NewModel("foo", "test.model") mockLoader := func(modelName, modelFile string) (*model.Model, error) { return mockModel, nil diff --git a/pkg/model/model.go b/pkg/model/model.go index 1927dc0c..6cb81d10 100644 --- a/pkg/model/model.go +++ b/pkg/model/model.go @@ -3,12 +3,14 @@ package model import grpc "github.com/mudler/LocalAI/pkg/grpc" type Model struct { + ID string `json:"id"` address string client grpc.Backend } -func NewModel(address string) *Model { +func NewModel(ID, address string) *Model { return &Model{ + ID: ID, address: address, } } From ef1507d000f2308f395a341d6c497de70427f1a5 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 26 Sep 2024 10:50:20 +0200 Subject: [PATCH 0196/1530] docs: :arrow_up: update docs version mudler/LocalAI (#3665) :arrow_up: Update docs version mudler/LocalAI Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- docs/data/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data/version.json b/docs/data/version.json index 0dba0428..470991b8 100644 --- a/docs/data/version.json +++ b/docs/data/version.json @@ -1,3 +1,3 @@ { - "version": "v2.21.0" + "version": "v2.21.1" } From d6522e69ca0f972b2d0d8f617b1cc131ac5026c6 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 26 Sep 2024 10:57:40 +0200 Subject: [PATCH 0197/1530] feat(swagger): update swagger (#3664) Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- swagger/docs.go | 14 ++++++++++++++ swagger/swagger.json | 14 ++++++++++++++ swagger/swagger.yaml | 9 +++++++++ 3 files changed, 37 insertions(+) diff --git a/swagger/docs.go b/swagger/docs.go index ffb2ba03..c283dcb0 100644 --- a/swagger/docs.go +++ b/swagger/docs.go @@ -972,6 +972,14 @@ const docTemplate = `{ } } }, + "model.Model": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + } + }, "openai.Assistant": { "type": "object", "properties": { @@ -1682,6 +1690,12 @@ const docTemplate = `{ "items": { "type": "string" } + }, + "loaded_models": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Model" + } } } }, diff --git a/swagger/swagger.json b/swagger/swagger.json index e3aebe43..0a3be179 100644 --- a/swagger/swagger.json +++ b/swagger/swagger.json @@ -965,6 +965,14 @@ } } }, + "model.Model": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + } + }, "openai.Assistant": { "type": "object", "properties": { @@ -1675,6 +1683,12 @@ "items": { "type": "string" } + }, + "loaded_models": { + "type": "array", + "items": { + "$ref": "#/definitions/model.Model" + } } } }, diff --git a/swagger/swagger.yaml b/swagger/swagger.yaml index 649b86e4..7b6619b4 100644 --- a/swagger/swagger.yaml +++ b/swagger/swagger.yaml @@ -168,6 +168,11 @@ definitions: type: string type: array type: object + model.Model: + properties: + id: + type: string + type: object openai.Assistant: properties: created: @@ -652,6 +657,10 @@ definitions: items: type: string type: array + loaded_models: + items: + $ref: '#/definitions/model.Model' + type: array type: object schema.TTSRequest: description: TTS request body From 3d12d2037c83f9d5d3ae832e97311b29547532e1 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 26 Sep 2024 11:19:26 +0200 Subject: [PATCH 0198/1530] models(gallery): add llama-3.2 3B and 1B (#3671) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 60 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 9b8a0220..de38c3d5 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,64 @@ --- +## llama3.2 +- &llama32 + url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png + license: llama3.2 + description: | + The Meta Llama 3.2 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction-tuned generative models in 1B and 3B sizes (text in/text out). The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks. + + Model Developer: Meta + + Model Architecture: Llama 3.2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. + tags: + - llm + - gguf + - gpu + - cpu + - llama3.2 + name: "llama-3.2-1b-instruct:q4_k_m" + urls: + - https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q4_K_M-GGUF + overrides: + parameters: + model: llama-3.2-1b-instruct-q4_k_m.gguf + files: + - filename: llama-3.2-1b-instruct-q4_k_m.gguf + sha256: 1d0e9419ec4e12aef73ccf4ffd122703e94c48344a96bc7c5f0f2772c2152ce3 + uri: huggingface://hugging-quants/Llama-3.2-1B-Instruct-Q4_K_M-GGUF/llama-3.2-1b-instruct-q4_k_m.gguf +- !!merge <<: *llama32 + name: "llama-3.2-3b-instruct:q4_k_m" + urls: + - https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF + overrides: + parameters: + model: llama-3.2-3b-instruct-q4_k_m.gguf + files: + - filename: llama-3.2-3b-instruct-q4_k_m.gguf + sha256: c55a83bfb6396799337853ca69918a0b9bbb2917621078c34570bc17d20fd7a1 + uri: huggingface://hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF/llama-3.2-3b-instruct-q4_k_m.gguf +- !!merge <<: *llama32 + name: "llama-3.2-3b-instruct:q8_0" + urls: + - https://huggingface.co/hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF + overrides: + parameters: + model: llama-3.2-3b-instruct-q8_0.gguf + files: + - filename: llama-3.2-3b-instruct-q8_0.gguf + sha256: 51725f77f997a5080c3d8dd66e073da22ddf48ab5264f21f05ded9b202c3680e + uri: huggingface://hugging-quants/Llama-3.2-3B-Instruct-Q8_0-GGUF/llama-3.2-3b-instruct-q8_0.gguf +- !!merge <<: *llama32 + name: "llama-3.2-1b-instruct:q8_0" + urls: + - https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF + overrides: + parameters: + model: llama-3.2-1b-instruct-q8_0.gguf + files: + - filename: llama-3.2-1b-instruct-q8_0.gguf + sha256: ba345c83bf5cc679c653b853c46517eea5a34f03ed2205449db77184d9ae62a9 + uri: huggingface://hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF/llama-3.2-1b-instruct-q8_0.gguf ## Qwen2.5 - &qwen25 name: "qwen2.5-14b-instruct" From fa5c98549aae32df63a9c3e34574701e45287d29 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 26 Sep 2024 12:44:55 +0200 Subject: [PATCH 0199/1530] chore(refactor): track grpcProcess in the model structure (#3663) * chore(refactor): track grpcProcess in the model structure This avoids to have to handle in two parts the data relative to the same model. It makes it easier to track and use mutex with. This also fixes races conditions while accessing to the model. Signed-off-by: Ettore Di Giacinto * chore(tests): run protogen-go before starting aio tests Signed-off-by: Ettore Di Giacinto * chore(tests): install protoc in aio tests Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- .github/workflows/test.yml | 11 ++++++++++- Makefile | 2 +- pkg/model/initializers.go | 15 ++++++++++----- pkg/model/loader.go | 32 ++++++++++++++------------------ pkg/model/loader_test.go | 4 ++-- pkg/model/model.go | 18 ++++++++++++++++-- pkg/model/process.go | 33 ++++++++++++++++++--------------- 7 files changed, 71 insertions(+), 44 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2af3fd00..b62f86ef 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -178,13 +178,22 @@ jobs: uses: actions/checkout@v4 with: submodules: true + - name: Dependencies + run: | + # Install protoc + curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \ + unzip -j -d /usr/local/bin protoc.zip bin/protoc && \ + rm protoc.zip + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af + PATH="$PATH:$HOME/go/bin" make protogen-go - name: Build images run: | docker build --build-arg FFMPEG=true --build-arg IMAGE_TYPE=extras --build-arg EXTRA_BACKENDS=rerankers --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile . BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test make docker-aio - name: Test run: | - LOCALAI_MODELS_DIR=$PWD/models LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio \ + PATH="$PATH:$HOME/go/bin" LOCALAI_MODELS_DIR=$PWD/models LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio \ make run-e2e-aio - name: Setup tmate session if tests fail if: ${{ failure() }} diff --git a/Makefile b/Makefile index 121b8e50..4efee986 100644 --- a/Makefile +++ b/Makefile @@ -468,7 +468,7 @@ run-e2e-image: ls -liah $(abspath ./tests/e2e-fixtures) docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests -run-e2e-aio: +run-e2e-aio: protogen-go @echo 'Running e2e AIO tests' $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e-aio diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 80dd10b4..d0f47373 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -304,18 +304,19 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string return nil, fmt.Errorf("failed allocating free ports: %s", err.Error()) } // Make sure the process is executable - if err := ml.startProcess(uri, o.model, serverAddress); err != nil { + process, err := ml.startProcess(uri, o.model, serverAddress) + if err != nil { log.Error().Err(err).Str("path", uri).Msg("failed to launch ") return nil, err } log.Debug().Msgf("GRPC Service Started") - client = NewModel(modelName, serverAddress) + client = NewModel(modelName, serverAddress, process) } else { log.Debug().Msg("external backend is uri") // address - client = NewModel(modelName, uri) + client = NewModel(modelName, uri, nil) } } else { grpcProcess := backendPath(o.assetDir, backend) @@ -346,13 +347,14 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string args, grpcProcess = library.LoadLDSO(o.assetDir, args, grpcProcess) // Make sure the process is executable in any circumstance - if err := ml.startProcess(grpcProcess, o.model, serverAddress, args...); err != nil { + process, err := ml.startProcess(grpcProcess, o.model, serverAddress, args...) + if err != nil { return nil, err } log.Debug().Msgf("GRPC Service Started") - client = NewModel(modelName, serverAddress) + client = NewModel(modelName, serverAddress, process) } log.Debug().Msgf("Wait for the service to start up") @@ -374,6 +376,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string if !ready { log.Debug().Msgf("GRPC Service NOT ready") + ml.deleteProcess(o.model) return nil, fmt.Errorf("grpc service not ready") } @@ -385,9 +388,11 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string res, err := client.GRPC(o.parallelRequests, ml.wd).LoadModel(o.context, &options) if err != nil { + ml.deleteProcess(o.model) return nil, fmt.Errorf("could not load model: %w", err) } if !res.Success { + ml.deleteProcess(o.model) return nil, fmt.Errorf("could not load model (no success): %s", res.Message) } diff --git a/pkg/model/loader.go b/pkg/model/loader.go index 4f1ec841..68ac1a31 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -13,7 +13,6 @@ import ( "github.com/mudler/LocalAI/pkg/utils" - process "github.com/mudler/go-processmanager" "github.com/rs/zerolog/log" ) @@ -21,20 +20,18 @@ import ( // TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we seperate directories for .bin/.yaml and .tmpl type ModelLoader struct { - ModelPath string - mu sync.Mutex - models map[string]*Model - grpcProcesses map[string]*process.Process - templates *templates.TemplateCache - wd *WatchDog + ModelPath string + mu sync.Mutex + models map[string]*Model + templates *templates.TemplateCache + wd *WatchDog } func NewModelLoader(modelPath string) *ModelLoader { nml := &ModelLoader{ - ModelPath: modelPath, - models: make(map[string]*Model), - templates: templates.NewTemplateCache(modelPath), - grpcProcesses: make(map[string]*process.Process), + ModelPath: modelPath, + models: make(map[string]*Model), + templates: templates.NewTemplateCache(modelPath), } return nml @@ -127,6 +124,8 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( modelFile := filepath.Join(ml.ModelPath, modelName) log.Debug().Msgf("Loading model in memory from file: %s", modelFile) + ml.mu.Lock() + defer ml.mu.Unlock() model, err := loader(modelName, modelFile) if err != nil { return nil, err @@ -136,8 +135,6 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( return nil, fmt.Errorf("loader didn't return a model") } - ml.mu.Lock() - defer ml.mu.Unlock() ml.models[modelName] = model return model, nil @@ -146,14 +143,13 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( func (ml *ModelLoader) ShutdownModel(modelName string) error { ml.mu.Lock() defer ml.mu.Unlock() - - _, ok := ml.models[modelName] + model, ok := ml.models[modelName] if !ok { return fmt.Errorf("model %s not found", modelName) } retries := 1 - for ml.models[modelName].GRPC(false, ml.wd).IsBusy() { + for model.GRPC(false, ml.wd).IsBusy() { log.Debug().Msgf("%s busy. Waiting.", modelName) dur := time.Duration(retries*2) * time.Second if dur > retryTimeout { @@ -185,8 +181,8 @@ func (ml *ModelLoader) CheckIsLoaded(s string) *Model { if !alive { log.Warn().Msgf("GRPC Model not responding: %s", err.Error()) log.Warn().Msgf("Deleting the process in order to recreate it") - process, exists := ml.grpcProcesses[s] - if !exists { + process := m.Process() + if process == nil { log.Error().Msgf("Process not found for '%s' and the model is not responding anymore !", s) return m } diff --git a/pkg/model/loader_test.go b/pkg/model/loader_test.go index c16a6e50..d0ad4e0c 100644 --- a/pkg/model/loader_test.go +++ b/pkg/model/loader_test.go @@ -63,7 +63,7 @@ var _ = Describe("ModelLoader", func() { Context("LoadModel", func() { It("should load a model and keep it in memory", func() { - mockModel = model.NewModel("foo", "test.model") + mockModel = model.NewModel("foo", "test.model", nil) mockLoader := func(modelName, modelFile string) (*model.Model, error) { return mockModel, nil @@ -88,7 +88,7 @@ var _ = Describe("ModelLoader", func() { Context("ShutdownModel", func() { It("should shutdown a loaded model", func() { - mockModel = model.NewModel("foo", "test.model") + mockModel = model.NewModel("foo", "test.model", nil) mockLoader := func(modelName, modelFile string) (*model.Model, error) { return mockModel, nil diff --git a/pkg/model/model.go b/pkg/model/model.go index 6cb81d10..6e4fd316 100644 --- a/pkg/model/model.go +++ b/pkg/model/model.go @@ -1,20 +1,32 @@ package model -import grpc "github.com/mudler/LocalAI/pkg/grpc" +import ( + "sync" + + grpc "github.com/mudler/LocalAI/pkg/grpc" + process "github.com/mudler/go-processmanager" +) type Model struct { ID string `json:"id"` address string client grpc.Backend + process *process.Process + sync.Mutex } -func NewModel(ID, address string) *Model { +func NewModel(ID, address string, process *process.Process) *Model { return &Model{ ID: ID, address: address, + process: process, } } +func (m *Model) Process() *process.Process { + return m.process +} + func (m *Model) GRPC(parallel bool, wd *WatchDog) grpc.Backend { if m.client != nil { return m.client @@ -25,6 +37,8 @@ func (m *Model) GRPC(parallel bool, wd *WatchDog) grpc.Backend { enableWD = true } + m.Lock() + defer m.Unlock() m.client = grpc.NewClient(m.address, parallel, wd, enableWD) return m.client } diff --git a/pkg/model/process.go b/pkg/model/process.go index bcd1fccb..48631d79 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -16,20 +16,22 @@ import ( ) func (ml *ModelLoader) deleteProcess(s string) error { - if _, exists := ml.grpcProcesses[s]; exists { - if err := ml.grpcProcesses[s].Stop(); err != nil { - log.Error().Err(err).Msgf("(deleteProcess) error while deleting grpc process %s", s) + if m, exists := ml.models[s]; exists { + process := m.Process() + if process != nil { + if err := process.Stop(); err != nil { + log.Error().Err(err).Msgf("(deleteProcess) error while deleting process %s", s) + } } } - delete(ml.grpcProcesses, s) delete(ml.models, s) return nil } func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) error { var err error = nil - for k, p := range ml.grpcProcesses { - if filter(k, p) { + for k, m := range ml.models { + if filter(k, m.Process()) { e := ml.ShutdownModel(k) err = errors.Join(err, e) } @@ -44,17 +46,20 @@ func (ml *ModelLoader) StopAllGRPC() error { func (ml *ModelLoader) GetGRPCPID(id string) (int, error) { ml.mu.Lock() defer ml.mu.Unlock() - p, exists := ml.grpcProcesses[id] + p, exists := ml.models[id] if !exists { return -1, fmt.Errorf("no grpc backend found for %s", id) } - return strconv.Atoi(p.PID) + if p.Process() == nil { + return -1, fmt.Errorf("no grpc backend found for %s", id) + } + return strconv.Atoi(p.Process().PID) } -func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string, args ...string) error { +func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string, args ...string) (*process.Process, error) { // Make sure the process is executable if err := os.Chmod(grpcProcess, 0700); err != nil { - return err + return nil, err } log.Debug().Msgf("Loading GRPC Process: %s", grpcProcess) @@ -63,7 +68,7 @@ func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string workDir, err := filepath.Abs(filepath.Dir(grpcProcess)) if err != nil { - return err + return nil, err } grpcControlProcess := process.New( @@ -79,10 +84,8 @@ func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string ml.wd.AddAddressModelMap(serverAddress, id) } - ml.grpcProcesses[id] = grpcControlProcess - if err := grpcControlProcess.Run(); err != nil { - return err + return grpcControlProcess, err } log.Debug().Msgf("GRPC Service state dir: %s", grpcControlProcess.StateDir()) @@ -116,5 +119,5 @@ func (ml *ModelLoader) startProcess(grpcProcess, id string, serverAddress string } }() - return nil + return grpcControlProcess, nil } From b0f4556c0f4277fc4056c396e4c639f7b41ea952 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 26 Sep 2024 14:52:26 +0200 Subject: [PATCH 0200/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `ea9c32be71b91b42ecc538bd902e93cbb5fb36cb` (#3667) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Ettore Di Giacinto --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4efee986..3a90463b 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=70392f1f81470607ba3afef04aa56c9f65587664 +CPPLLAMA_VERSION?=ea9c32be71b91b42ecc538bd902e93cbb5fb36cb # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 8c4196faf34a123f018471890873403bec33b702 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 26 Sep 2024 15:58:17 +0200 Subject: [PATCH 0201/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `69339af2d104802f3f201fd419163defba52890e` (#3666) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Ettore Di Giacinto --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3a90463b..07fd6ee3 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=0d2e2aed80109e8696791083bde3b58e190b7812 +WHISPER_CPP_VERSION?=69339af2d104802f3f201fd419163defba52890e # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From f2ba1cfb01d738d61dd443589d2878d4643e4fe2 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 26 Sep 2024 23:41:45 +0200 Subject: [PATCH 0202/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `95bc82fbc0df6d48cf66c857a4dda3d044f45ca2` (#3674) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 07fd6ee3..ab7532d3 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=ea9c32be71b91b42ecc538bd902e93cbb5fb36cb +CPPLLAMA_VERSION?=95bc82fbc0df6d48cf66c857a4dda3d044f45ca2 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 4550abbfcece4f1ae4e2162431e6cd772d7a92d4 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 27 Sep 2024 08:54:36 +0200 Subject: [PATCH 0203/1530] chore(model-gallery): :arrow_up: update checksum (#3675) :arrow_up: Checksum updates in gallery/index.yaml Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- gallery/index.yaml | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index de38c3d5..4b668061 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -59,8 +59,8 @@ - filename: llama-3.2-1b-instruct-q8_0.gguf sha256: ba345c83bf5cc679c653b853c46517eea5a34f03ed2205449db77184d9ae62a9 uri: huggingface://hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF/llama-3.2-1b-instruct-q8_0.gguf -## Qwen2.5 - &qwen25 + ## Qwen2.5 name: "qwen2.5-14b-instruct" url: "github:mudler/LocalAI/gallery/chatml.yaml@master" license: apache-2.0 @@ -89,11 +89,11 @@ - https://huggingface.co/bartowski/Qwen2.5-Math-7B-Instruct-GGUF - https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct description: | - In August 2024, we released the first series of mathematical LLMs - Qwen2-Math - of our Qwen family. A month later, we have upgraded it and open-sourced Qwen2.5-Math series, including base models Qwen2.5-Math-1.5B/7B/72B, instruction-tuned models Qwen2.5-Math-1.5B/7B/72B-Instruct, and mathematical reward model Qwen2.5-Math-RM-72B. + In August 2024, we released the first series of mathematical LLMs - Qwen2-Math - of our Qwen family. A month later, we have upgraded it and open-sourced Qwen2.5-Math series, including base models Qwen2.5-Math-1.5B/7B/72B, instruction-tuned models Qwen2.5-Math-1.5B/7B/72B-Instruct, and mathematical reward model Qwen2.5-Math-RM-72B. - Unlike Qwen2-Math series which only supports using Chain-of-Thught (CoT) to solve English math problems, Qwen2.5-Math series is expanded to support using both CoT and Tool-integrated Reasoning (TIR) to solve math problems in both Chinese and English. The Qwen2.5-Math series models have achieved significant performance improvements compared to the Qwen2-Math series models on the Chinese and English mathematics benchmarks with CoT. + Unlike Qwen2-Math series which only supports using Chain-of-Thught (CoT) to solve English math problems, Qwen2.5-Math series is expanded to support using both CoT and Tool-integrated Reasoning (TIR) to solve math problems in both Chinese and English. The Qwen2.5-Math series models have achieved significant performance improvements compared to the Qwen2-Math series models on the Chinese and English mathematics benchmarks with CoT. - The base models of Qwen2-Math are initialized with Qwen2-1.5B/7B/72B, and then pretrained on a meticulously designed Mathematics-specific Corpus. This corpus contains large-scale high-quality mathematical web texts, books, codes, exam questions, and mathematical pre-training data synthesized by Qwen2. + The base models of Qwen2-Math are initialized with Qwen2-1.5B/7B/72B, and then pretrained on a meticulously designed Mathematics-specific Corpus. This corpus contains large-scale high-quality mathematical web texts, books, codes, exam questions, and mathematical pre-training data synthesized by Qwen2. overrides: parameters: model: Qwen2.5-Math-7B-Instruct-Q4_K_M.gguf @@ -195,8 +195,8 @@ model: Qwen2.5-32B.Q4_K_M.gguf files: - filename: Qwen2.5-32B.Q4_K_M.gguf - sha256: 02703e27c8b964db445444581a6937ad7538f0c32a100b26b49fa0e8ff527155 uri: huggingface://mradermacher/Qwen2.5-32B-GGUF/Qwen2.5-32B.Q4_K_M.gguf + sha256: fa42a4067e3630929202b6bb1ef5cebc43c1898494aedfd567b7d53c7a9d84a6 - !!merge <<: *qwen25 name: "qwen2.5-32b-instruct" urls: @@ -221,8 +221,8 @@ - filename: Qwen2.5-72B-Instruct-Q4_K_M.gguf sha256: e4c8fad16946be8cf0bbf67eb8f4e18fc7415a5a6d2854b4cda453edb4082545 uri: huggingface://bartowski/Qwen2.5-72B-Instruct-GGUF/Qwen2.5-72B-Instruct-Q4_K_M.gguf -## SmolLM - &smollm + ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" name: "smollm-1.7b-instruct" icon: https://huggingface.co/datasets/HuggingFaceTB/images/resolve/main/banner_smol.png @@ -651,9 +651,9 @@ - https://huggingface.co/leafspark/Reflection-Llama-3.1-70B-bf16 - https://huggingface.co/senseable/Reflection-Llama-3.1-70B-gguf description: | - Reflection Llama-3.1 70B is (currently) the world's top open-source LLM, trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course. + Reflection Llama-3.1 70B is (currently) the world's top open-source LLM, trained with a new technique called Reflection-Tuning that teaches a LLM to detect mistakes in its reasoning and correct course. - The model was trained on synthetic data generated by Glaive. If you're training a model, Glaive is incredible — use them. + The model was trained on synthetic data generated by Glaive. If you're training a model, Glaive is incredible — use them. overrides: parameters: model: Reflection-Llama-3.1-70B-q4_k_m.gguf @@ -973,15 +973,15 @@ - https://huggingface.co/Sao10K/L3.1-8B-Niitama-v1.1 - https://huggingface.co/Lewdiculous/L3.1-8B-Niitama-v1.1-GGUF-IQ-Imatrix description: | - GGUF-IQ-Imatrix quants for Sao10K/L3.1-8B-Niitama-v1.1 - Here's the subjectively superior L3 version: L3-8B-Niitama-v1 - An experimental model using experimental methods. + GGUF-IQ-Imatrix quants for Sao10K/L3.1-8B-Niitama-v1.1 + Here's the subjectively superior L3 version: L3-8B-Niitama-v1 + An experimental model using experimental methods. - More detail on it: + More detail on it: - Tamamo and Niitama are made from the same data. Literally. The only thing that's changed is how theyre shuffled and formatted. Yet, I get wildly different results. + Tamamo and Niitama are made from the same data. Literally. The only thing that's changed is how theyre shuffled and formatted. Yet, I get wildly different results. - Interesting, eh? Feels kinda not as good compared to the l3 version, but it's aight. + Interesting, eh? Feels kinda not as good compared to the l3 version, but it's aight. overrides: parameters: model: L3.1-8B-Niitama-v1.1-Q4_K_M-imat.gguf @@ -1606,8 +1606,8 @@ urls: - https://huggingface.co/Lewdiculous/MN-12B-Lyra-v4-GGUF-IQ-Imatrix description: | - A finetune of Mistral Nemo by Sao10K. - Uses the ChatML prompt format. + A finetune of Mistral Nemo by Sao10K. + Uses the ChatML prompt format. overrides: parameters: model: MN-12B-Lyra-v4-Q4_K_M-imat.gguf @@ -2134,7 +2134,7 @@ - https://huggingface.co/EpistemeAI/Athena-codegemma-2-2b-it - https://huggingface.co/mradermacher/Athena-codegemma-2-2b-it-GGUF description: | - Supervised fine tuned (sft unsloth) for coding with EpistemeAI coding dataset. + Supervised fine tuned (sft unsloth) for coding with EpistemeAI coding dataset. overrides: parameters: model: Athena-codegemma-2-2b-it.Q4_K_M.gguf From 453c45d022c7f211279f3d30cf519520636dd7be Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 27 Sep 2024 12:21:04 +0200 Subject: [PATCH 0204/1530] models(gallery): add magnusintellectus-12b-v1-i1 (#3678) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 4b668061..1a1828f6 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1615,6 +1615,27 @@ - filename: MN-12B-Lyra-v4-Q4_K_M-imat.gguf sha256: 1989123481ca1936c8a2cbe278ff5d1d2b0ae63dbdc838bb36a6d7547b8087b3 uri: huggingface://Lewdiculous/MN-12B-Lyra-v4-GGUF-IQ-Imatrix/MN-12B-Lyra-v4-Q4_K_M-imat.gguf +- !!merge <<: *mistral03 + name: "magnusintellectus-12b-v1-i1" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/66b564058d9afb7a9d5607d5/hUVJI1Qa4tCMrZWMgYkoD.png + urls: + - https://huggingface.co/GalrionSoftworks/MagnusIntellectus-12B-v1 + - https://huggingface.co/mradermacher/MagnusIntellectus-12B-v1-i1-GGUF + description: | + How pleasant, the rocks appear to have made a decent conglomerate. A-. + + MagnusIntellectus is a merge of the following models using LazyMergekit: + + UsernameJustAnother/Nemo-12B-Marlin-v5 + anthracite-org/magnum-12b-v2 + overrides: + parameters: + model: MagnusIntellectus-12B-v1.i1-Q4_K_M.gguf + files: + - filename: MagnusIntellectus-12B-v1.i1-Q4_K_M.gguf + sha256: c97107983b4edc5b6f2a592d227ca2dd4196e2af3d3bc0fe6b7a8954a1fb5870 + uri: huggingface://mradermacher/MagnusIntellectus-12B-v1-i1-GGUF/MagnusIntellectus-12B-v1.i1-Q4_K_M.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From 2a8cbad12222f59295911078e9acc3788e666f36 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 27 Sep 2024 13:03:41 +0200 Subject: [PATCH 0205/1530] models(gallery): add bigqwen2.5-52b-instruct (#3679) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 1a1828f6..847e004c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -221,6 +221,22 @@ - filename: Qwen2.5-72B-Instruct-Q4_K_M.gguf sha256: e4c8fad16946be8cf0bbf67eb8f4e18fc7415a5a6d2854b4cda453edb4082545 uri: huggingface://bartowski/Qwen2.5-72B-Instruct-GGUF/Qwen2.5-72B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "bigqwen2.5-52b-instruct" + icon: https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/98GiKtmH1AtHHbIbOUH4Y.jpeg + urls: + - https://huggingface.co/mlabonne/BigQwen2.5-52B-Instruct + - https://huggingface.co/bartowski/BigQwen2.5-52B-Instruct-GGUF + description: | + BigQwen2.5-52B-Instruct is a Qwen/Qwen2-32B-Instruct self-merge made with MergeKit. + It applies the mlabonne/Meta-Llama-3-120B-Instruct recipe. + overrides: + parameters: + model: BigQwen2.5-52B-Instruct-Q4_K_M.gguf + files: + - filename: BigQwen2.5-52B-Instruct-Q4_K_M.gguf + sha256: 9c939f08e366b51b07096eb2ecb5cc2a82894ac7baf639e446237ad39889c896 + uri: huggingface://bartowski/BigQwen2.5-52B-Instruct-GGUF/BigQwen2.5-52B-Instruct-Q4_K_M.gguf - &smollm ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 4e0f3cc9802e56fae2a52715298257932e3c0f5e Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 28 Sep 2024 00:42:59 +0200 Subject: [PATCH 0206/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `b5de3b74a595cbfefab7eeb5a567425c6a9690cf` (#3681) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ab7532d3..2c7310d8 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=95bc82fbc0df6d48cf66c857a4dda3d044f45ca2 +CPPLLAMA_VERSION?=b5de3b74a595cbfefab7eeb5a567425c6a9690cf # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From e94a50e9db24aa03ce0d53a5200099aadb52b3aa Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 28 Sep 2024 10:02:19 +0200 Subject: [PATCH 0207/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `8feb375fbdf0277ad36958c218c6bf48fa0ba75a` (#3680) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Ettore Di Giacinto --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2c7310d8..aa926f4c 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=69339af2d104802f3f201fd419163defba52890e +WHISPER_CPP_VERSION?=8feb375fbdf0277ad36958c218c6bf48fa0ba75a # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 50a3b54e3474fd552b352222a90f70c8ab624ceb Mon Sep 17 00:00:00 2001 From: siddimore Date: Sat, 28 Sep 2024 08:23:56 -0700 Subject: [PATCH 0208/1530] feat(api): add correlationID to Track Chat requests (#3668) * Add CorrelationID to chat request Signed-off-by: Siddharth More * remove get_token_metrics Signed-off-by: Siddharth More * Add CorrelationID to proto Signed-off-by: Siddharth More * fix correlation method name Signed-off-by: Siddharth More * Update core/http/endpoints/openai/chat.go Co-authored-by: Ettore Di Giacinto Signed-off-by: Siddharth More * Update core/http/endpoints/openai/chat.go Signed-off-by: Ettore Di Giacinto Signed-off-by: Siddharth More --------- Signed-off-by: Siddharth More Signed-off-by: Ettore Di Giacinto Co-authored-by: Ettore Di Giacinto --- backend/backend.proto | 1 + backend/cpp/llama/grpc-server.cpp | 14 ++++++++++++++ core/http/endpoints/openai/chat.go | 7 +++++++ core/http/endpoints/openai/completion.go | 2 ++ core/http/endpoints/openai/request.go | 13 ++++++++++++- 5 files changed, 36 insertions(+), 1 deletion(-) diff --git a/backend/backend.proto b/backend/backend.proto index 31bd63e5..b2d4518e 100644 --- a/backend/backend.proto +++ b/backend/backend.proto @@ -136,6 +136,7 @@ message PredictOptions { repeated Message Messages = 44; repeated string Videos = 45; repeated string Audios = 46; + string CorrelationId = 47; } // The response message containing the result diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index 56d59d21..791612db 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -2106,6 +2106,9 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama data["ignore_eos"] = predict->ignoreeos(); data["embeddings"] = predict->embeddings(); + // Add the correlationid to json data + data["correlation_id"] = predict->correlationid(); + // for each image in the request, add the image data // for (int i = 0; i < predict->images_size(); i++) { @@ -2344,6 +2347,11 @@ public: int32_t tokens_evaluated = result.result_json.value("tokens_evaluated", 0); reply.set_prompt_tokens(tokens_evaluated); + // Log Request Correlation Id + LOG_VERBOSE("correlation:", { + { "id", data["correlation_id"] } + }); + // Send the reply writer->Write(reply); @@ -2367,6 +2375,12 @@ public: std::string completion_text; task_result result = llama.queue_results.recv(task_id); if (!result.error && result.stop) { + + // Log Request Correlation Id + LOG_VERBOSE("correlation:", { + { "id", data["correlation_id"] } + }); + completion_text = result.result_json.value("content", ""); int32_t tokens_predicted = result.result_json.value("tokens_predicted", 0); int32_t tokens_evaluated = result.result_json.value("tokens_evaluated", 0); diff --git a/core/http/endpoints/openai/chat.go b/core/http/endpoints/openai/chat.go index b937120a..1ac1387e 100644 --- a/core/http/endpoints/openai/chat.go +++ b/core/http/endpoints/openai/chat.go @@ -161,6 +161,12 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup textContentToReturn = "" id = uuid.New().String() created = int(time.Now().Unix()) + // Set CorrelationID + correlationID := c.Get("X-Correlation-ID") + if len(strings.TrimSpace(correlationID)) == 0 { + correlationID = id + } + c.Set("X-Correlation-ID", correlationID) modelFile, input, err := readRequest(c, cl, ml, startupOptions, true) if err != nil { @@ -444,6 +450,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup c.Set("Cache-Control", "no-cache") c.Set("Connection", "keep-alive") c.Set("Transfer-Encoding", "chunked") + c.Set("X-Correlation-ID", id) responses := make(chan schema.OpenAIResponse) diff --git a/core/http/endpoints/openai/completion.go b/core/http/endpoints/openai/completion.go index b087cc5f..e5de1b3f 100644 --- a/core/http/endpoints/openai/completion.go +++ b/core/http/endpoints/openai/completion.go @@ -57,6 +57,8 @@ func CompletionEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a } return func(c *fiber.Ctx) error { + // Add Correlation + c.Set("X-Correlation-ID", id) modelFile, input, err := readRequest(c, cl, ml, appConfig, true) if err != nil { return fmt.Errorf("failed reading parameters from request:%w", err) diff --git a/core/http/endpoints/openai/request.go b/core/http/endpoints/openai/request.go index e24dd28f..d6182a39 100644 --- a/core/http/endpoints/openai/request.go +++ b/core/http/endpoints/openai/request.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/gofiber/fiber/v2" + "github.com/google/uuid" "github.com/mudler/LocalAI/core/config" fiberContext "github.com/mudler/LocalAI/core/http/ctx" "github.com/mudler/LocalAI/core/schema" @@ -15,6 +16,11 @@ import ( "github.com/rs/zerolog/log" ) +type correlationIDKeyType string + +// CorrelationIDKey to track request across process boundary +const CorrelationIDKey correlationIDKeyType = "correlationID" + func readRequest(c *fiber.Ctx, cl *config.BackendConfigLoader, ml *model.ModelLoader, o *config.ApplicationConfig, firstModel bool) (string, *schema.OpenAIRequest, error) { input := new(schema.OpenAIRequest) @@ -24,9 +30,14 @@ func readRequest(c *fiber.Ctx, cl *config.BackendConfigLoader, ml *model.ModelLo } received, _ := json.Marshal(input) + // Extract or generate the correlation ID + correlationID := c.Get("X-Correlation-ID", uuid.New().String()) ctx, cancel := context.WithCancel(o.Context) - input.Context = ctx + // Add the correlation ID to the new context + ctxWithCorrelationID := context.WithValue(ctx, CorrelationIDKey, correlationID) + + input.Context = ctxWithCorrelationID input.Cancel = cancel log.Debug().Msgf("Request received: %s", string(received)) From 1689740269ef97e9778d75e78ae4d844520a113c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 29 Sep 2024 20:39:39 +0200 Subject: [PATCH 0209/1530] models(gallery): add replete-llm-v2.5-qwen-14b (#3688) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 847e004c..7701efd5 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -237,6 +237,23 @@ - filename: BigQwen2.5-52B-Instruct-Q4_K_M.gguf sha256: 9c939f08e366b51b07096eb2ecb5cc2a82894ac7baf639e446237ad39889c896 uri: huggingface://bartowski/BigQwen2.5-52B-Instruct-GGUF/BigQwen2.5-52B-Instruct-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "replete-llm-v2.5-qwen-14b" + icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/ihnWXDEgV-ZKN_B036U1J.png + urls: + - https://huggingface.co/Replete-AI/Replete-LLM-V2.5-Qwen-14b + - https://huggingface.co/bartowski/Replete-LLM-V2.5-Qwen-14b-GGUF + description: | + Replete-LLM-V2.5-Qwen-14b is a continues finetuned version of Qwen2.5-14B. I noticed recently that the Qwen team did not learn from my methods of continuous finetuning, the great benefits, and no downsides of it. So I took it upon myself to merge the instruct model with the base model myself using the Ties merge method + + This version of the model shows higher performance than the original instruct and base models. + overrides: + parameters: + model: Replete-LLM-V2.5-Qwen-14b-Q4_K_M.gguf + files: + - filename: Replete-LLM-V2.5-Qwen-14b-Q4_K_M.gguf + sha256: 17d0792ff5e3062aecb965629f66e679ceb407e4542e8045993dcfe9e7e14d9d + uri: huggingface://bartowski/Replete-LLM-V2.5-Qwen-14b-GGUF/Replete-LLM-V2.5-Qwen-14b-Q4_K_M.gguf - &smollm ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From ad62156d548adaade746e9d702301da2c793d0b9 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 29 Sep 2024 22:47:26 +0200 Subject: [PATCH 0210/1530] models(gallery): add replete-llm-v2.5-qwen-7b (#3689) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 7701efd5..2ffbd05b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -254,6 +254,24 @@ - filename: Replete-LLM-V2.5-Qwen-14b-Q4_K_M.gguf sha256: 17d0792ff5e3062aecb965629f66e679ceb407e4542e8045993dcfe9e7e14d9d uri: huggingface://bartowski/Replete-LLM-V2.5-Qwen-14b-GGUF/Replete-LLM-V2.5-Qwen-14b-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "replete-llm-v2.5-qwen-7b" + icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/ihnWXDEgV-ZKN_B036U1J.png + urls: + - https://huggingface.co/Replete-AI/Replete-LLM-V2.5-Qwen-7b + - https://huggingface.co/bartowski/Replete-LLM-V2.5-Qwen-7b-GGUF + description: | + Replete-LLM-V2.5-Qwen-7b is a continues finetuned version of Qwen2.5-14B. I noticed recently that the Qwen team did not learn from my methods of continuous finetuning, the great benefits, and no downsides of it. So I took it upon myself to merge the instruct model with the base model myself using the Ties merge method + + This version of the model shows higher performance than the original instruct and base models. + overrides: + parameters: + model: Replete-LLM-V2.5-Qwen-7b-Q4_K_M.gguf + files: + - filename: Replete-LLM-V2.5-Qwen-7b-Q4_K_M.gguf + sha256: 054d54972259c0398b4e0af3f408f608e1166837b1d7535d08fc440d1daf8639 + uri: huggingface://bartowski/Replete-LLM-V2.5-Qwen-7b-GGUF/Replete-LLM-V2.5-Qwen-7b-Q4_K_M.gguf + - &smollm ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 6dfee995754fb3853e02d69c370c670d636f4294 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Mon, 30 Sep 2024 09:09:18 +0200 Subject: [PATCH 0211/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `c919d5db39c8a7fcb64737f008e4b105ee0acd20` (#3686) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Ettore Di Giacinto --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index aa926f4c..8617363c 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=b5de3b74a595cbfefab7eeb5a567425c6a9690cf +CPPLLAMA_VERSION?=c919d5db39c8a7fcb64737f008e4b105ee0acd20 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 078942fc9f741a35a189f295d1b4fb4ed1e26400 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 30 Sep 2024 09:09:51 +0200 Subject: [PATCH 0212/1530] chore(deps): bump grpcio to 1.66.2 (#3690) Signed-off-by: Ettore Di Giacinto --- backend/python/autogptq/requirements.txt | 2 +- backend/python/bark/requirements.txt | 2 +- backend/python/common/template/requirements.txt | 2 +- backend/python/coqui/requirements.txt | 2 +- backend/python/diffusers/requirements.txt | 2 +- backend/python/exllama2/requirements.txt | 2 +- backend/python/mamba/requirements.txt | 2 +- backend/python/openvoice/requirements-intel.txt | 2 +- backend/python/openvoice/requirements.txt | 2 +- backend/python/parler-tts/requirements.txt | 2 +- backend/python/rerankers/requirements.txt | 2 +- backend/python/sentencetransformers/requirements.txt | 2 +- backend/python/transformers-musicgen/requirements.txt | 2 +- backend/python/transformers/requirements.txt | 2 +- backend/python/vall-e-x/requirements.txt | 2 +- backend/python/vllm/requirements.txt | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/backend/python/autogptq/requirements.txt b/backend/python/autogptq/requirements.txt index 150fcc1b..9cb6ce94 100644 --- a/backend/python/autogptq/requirements.txt +++ b/backend/python/autogptq/requirements.txt @@ -1,6 +1,6 @@ accelerate auto-gptq==0.7.1 -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi transformers \ No newline at end of file diff --git a/backend/python/bark/requirements.txt b/backend/python/bark/requirements.txt index 6404b98e..6e46924a 100644 --- a/backend/python/bark/requirements.txt +++ b/backend/python/bark/requirements.txt @@ -1,4 +1,4 @@ bark==0.1.5 -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi \ No newline at end of file diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt index 21610c1c..540c0eb5 100644 --- a/backend/python/common/template/requirements.txt +++ b/backend/python/common/template/requirements.txt @@ -1,2 +1,2 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf \ No newline at end of file diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index 2a91f2b9..29484f7d 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,4 @@ coqui-tts -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi \ No newline at end of file diff --git a/backend/python/diffusers/requirements.txt b/backend/python/diffusers/requirements.txt index 043c7aba..730e316f 100644 --- a/backend/python/diffusers/requirements.txt +++ b/backend/python/diffusers/requirements.txt @@ -1,5 +1,5 @@ setuptools -grpcio==1.66.1 +grpcio==1.66.2 pillow protobuf certifi diff --git a/backend/python/exllama2/requirements.txt b/backend/python/exllama2/requirements.txt index 6fb018a0..e3db2b2f 100644 --- a/backend/python/exllama2/requirements.txt +++ b/backend/python/exllama2/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi wheel diff --git a/backend/python/mamba/requirements.txt b/backend/python/mamba/requirements.txt index 8e1b0195..83ae4279 100644 --- a/backend/python/mamba/requirements.txt +++ b/backend/python/mamba/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index a9a4cc20..c568dab1 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -2,7 +2,7 @@ intel-extension-for-pytorch torch optimum[openvino] -grpcio==1.66.1 +grpcio==1.66.2 protobuf librosa==0.9.1 faster-whisper==1.0.3 diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index b38805be..6ee29ce4 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf librosa faster-whisper diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index 0da3da13..d7f36feb 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi llvmlite==0.43.0 \ No newline at end of file diff --git a/backend/python/rerankers/requirements.txt b/backend/python/rerankers/requirements.txt index 8e1b0195..83ae4279 100644 --- a/backend/python/rerankers/requirements.txt +++ b/backend/python/rerankers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements.txt b/backend/python/sentencetransformers/requirements.txt index b9cb6061..40a387f1 100644 --- a/backend/python/sentencetransformers/requirements.txt +++ b/backend/python/sentencetransformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi datasets diff --git a/backend/python/transformers-musicgen/requirements.txt b/backend/python/transformers-musicgen/requirements.txt index fb1119a9..a3f66651 100644 --- a/backend/python/transformers-musicgen/requirements.txt +++ b/backend/python/transformers-musicgen/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf scipy==1.14.0 certifi \ No newline at end of file diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index b19c59c0..084cc034 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements.txt b/backend/python/vall-e-x/requirements.txt index 8e1b0195..83ae4279 100644 --- a/backend/python/vall-e-x/requirements.txt +++ b/backend/python/vall-e-x/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi \ No newline at end of file diff --git a/backend/python/vllm/requirements.txt b/backend/python/vllm/requirements.txt index b9c192d5..8fb8a418 100644 --- a/backend/python/vllm/requirements.txt +++ b/backend/python/vllm/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.1 +grpcio==1.66.2 protobuf certifi setuptools \ No newline at end of file From 58662db48eaecd2d39d65a0c229a47032a6833d6 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 30 Sep 2024 17:11:54 +0200 Subject: [PATCH 0213/1530] models(gallery): add calme-2.2-qwen2.5-72b-i1 (#3691) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 2ffbd05b..0924e5cf 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -271,7 +271,30 @@ - filename: Replete-LLM-V2.5-Qwen-7b-Q4_K_M.gguf sha256: 054d54972259c0398b4e0af3f408f608e1166837b1d7535d08fc440d1daf8639 uri: huggingface://bartowski/Replete-LLM-V2.5-Qwen-7b-GGUF/Replete-LLM-V2.5-Qwen-7b-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "calme-2.2-qwen2.5-72b-i1" + icon: https://huggingface.co/MaziyarPanahi/calme-2.2-qwen2.5-72b/resolve/main/calme-2.webp + urls: + - https://huggingface.co/MaziyarPanahi/calme-2.2-qwen2.5-72b + - https://huggingface.co/mradermacher/calme-2.2-qwen2.5-72b-i1-GGUF + description: | + This model is a fine-tuned version of the powerful Qwen/Qwen2.5-72B-Instruct, pushing the boundaries of natural language understanding and generation even further. My goal was to create a versatile and robust model that excels across a wide range of benchmarks and real-world applications. + Use Cases + This model is suitable for a wide range of applications, including but not limited to: + + Advanced question-answering systems + Intelligent chatbots and virtual assistants + Content generation and summarization + Code generation and analysis + Complex problem-solving and decision support + overrides: + parameters: + model: calme-2.2-qwen2.5-72b.i1-Q4_K_M.gguf + files: + - filename: calme-2.2-qwen2.5-72b.i1-Q4_K_M.gguf + sha256: 5fdfa599724d7c78502c477ced1d294e92781b91d3265bd0748fbf15a6fefde6 + uri: huggingface://mradermacher/calme-2.2-qwen2.5-72b-i1-GGUF/calme-2.2-qwen2.5-72b.i1-Q4_K_M.gguf - &smollm ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From d747f2c89bc71cf4ca57539d68472d7b9e3bf0f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 21:08:16 +0000 Subject: [PATCH 0214/1530] chore(deps): Bump openai from 1.47.1 to 1.50.2 in /examples/langchain-chroma (#3697) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.47.1 to 1.50.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.47.1...v1.50.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 19929482..b6404437 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.0 -openai==1.47.1 +openai==1.50.2 chromadb==0.5.7 llama-index==0.11.12 \ No newline at end of file From 164a9e972fed51dae394e01ffd59a9a04b6ee44a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 01:37:30 +0000 Subject: [PATCH 0215/1530] chore(deps): Bump chromadb from 0.5.7 to 0.5.11 in /examples/langchain-chroma (#3696) chore(deps): Bump chromadb in /examples/langchain-chroma Bumps [chromadb](https://github.com/chroma-core/chroma) from 0.5.7 to 0.5.11. - [Release notes](https://github.com/chroma-core/chroma/releases) - [Changelog](https://github.com/chroma-core/chroma/blob/main/RELEASE_PROCESS.md) - [Commits](https://github.com/chroma-core/chroma/compare/0.5.7...0.5.11) --- updated-dependencies: - dependency-name: chromadb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index b6404437..756a6bf3 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.0 openai==1.50.2 -chromadb==0.5.7 +chromadb==0.5.11 llama-index==0.11.12 \ No newline at end of file From 32de75c68326758eac7f714fc522eb65c36fde18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 03:13:37 +0000 Subject: [PATCH 0216/1530] chore(deps): Bump langchain from 0.3.0 to 0.3.1 in /examples/langchain-chroma (#3694) chore(deps): Bump langchain in /examples/langchain-chroma Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.0 to 0.3.1. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.0...langchain==0.3.1) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 756a6bf3..fda5f9d8 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ -langchain==0.3.0 +langchain==0.3.1 openai==1.50.2 chromadb==0.5.11 llama-index==0.11.12 \ No newline at end of file From f19277b8e2bc148193650a26927f183bc106c50a Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 1 Oct 2024 08:47:48 +0200 Subject: [PATCH 0217/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `6f1d9d71f4c568778a7637ff6582e6f6ba5fb9d3` (#3708) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8617363c..6c6dbd21 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=c919d5db39c8a7fcb64737f008e4b105ee0acd20 +CPPLLAMA_VERSION?=6f1d9d71f4c568778a7637ff6582e6f6ba5fb9d3 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 2908ff3f6b7a63fcd89e0cf5571c0409257209ac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 08:50:40 +0200 Subject: [PATCH 0218/1530] chore(deps): Bump securego/gosec from 2.21.0 to 2.21.4 (#3698) Bumps [securego/gosec](https://github.com/securego/gosec) from 2.21.0 to 2.21.4. - [Release notes](https://github.com/securego/gosec/releases) - [Changelog](https://github.com/securego/gosec/blob/master/.goreleaser.yml) - [Commits](https://github.com/securego/gosec/compare/v2.21.0...v2.21.4) --- updated-dependencies: - dependency-name: securego/gosec dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/secscan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/secscan.yaml b/.github/workflows/secscan.yaml index db9db586..3fd808e1 100644 --- a/.github/workflows/secscan.yaml +++ b/.github/workflows/secscan.yaml @@ -18,7 +18,7 @@ jobs: if: ${{ github.actor != 'dependabot[bot]' }} - name: Run Gosec Security Scanner if: ${{ github.actor != 'dependabot[bot]' }} - uses: securego/gosec@v2.21.0 + uses: securego/gosec@v2.21.4 with: # we let the report trigger content trigger a failure using the GitHub Security features. args: '-no-fail -fmt sarif -out results.sarif ./...' From 6bd6e2bdeb74e52a291052c4c8b808178ed40d90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 08:51:07 +0200 Subject: [PATCH 0219/1530] chore(deps): Bump openai from 1.47.1 to 1.50.2 in /examples/functions (#3699) Bumps [openai](https://github.com/openai/openai-python) from 1.47.1 to 1.50.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.47.1...v1.50.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index c3ffad01..9ad014fd 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.0 -openai==1.47.1 +openai==1.50.2 From 44bdacac61a319992f3bf3a32f756a65862617ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 08:51:29 +0200 Subject: [PATCH 0220/1530] chore(deps): Bump langchain from 0.3.0 to 0.3.1 in /examples/langchain/langchainpy-localai-example (#3704) chore(deps): Bump langchain Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.0 to 0.3.1. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.0...langchain==0.3.1) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 179abc2a..daa467c7 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -10,7 +10,7 @@ debugpy==1.8.2 frozenlist==1.4.1 greenlet==3.1.0 idna==3.10 -langchain==0.3.0 +langchain==0.3.1 langchain-community==0.2.16 marshmallow==3.22.0 marshmallow-enum==1.5.1 From 7d306c6431ddba153704e5513e716288c7d73d09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:39:55 +0200 Subject: [PATCH 0221/1530] chore(deps): Bump greenlet from 3.1.0 to 3.1.1 in /examples/langchain/langchainpy-localai-example (#3703) chore(deps): Bump greenlet Bumps [greenlet](https://github.com/python-greenlet/greenlet) from 3.1.0 to 3.1.1. - [Changelog](https://github.com/python-greenlet/greenlet/blob/master/CHANGES.rst) - [Commits](https://github.com/python-greenlet/greenlet/compare/3.1.0...3.1.1) --- updated-dependencies: - dependency-name: greenlet dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index daa467c7..205c726c 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -8,7 +8,7 @@ colorama==0.4.6 dataclasses-json==0.6.7 debugpy==1.8.2 frozenlist==1.4.1 -greenlet==3.1.0 +greenlet==3.1.1 idna==3.10 langchain==0.3.1 langchain-community==0.2.16 From d4d2a76f8f4b1379c2c554c911ba64ec1bbda389 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:40:08 +0200 Subject: [PATCH 0222/1530] chore(deps): Bump langchain from 0.3.0 to 0.3.1 in /examples/functions (#3700) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.0 to 0.3.1. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.0...langchain==0.3.1) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 9ad014fd..952f9d62 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.3.0 +langchain==0.3.1 openai==1.50.2 From 76d4e88e0c21b245e74e8ba6e15a5d937d1fdfb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:40:21 +0200 Subject: [PATCH 0223/1530] chore(deps): Bump langchain-community from 0.2.16 to 0.3.1 in /examples/langchain/langchainpy-localai-example (#3702) chore(deps): Bump langchain-community Bumps [langchain-community](https://github.com/langchain-ai/langchain) from 0.2.16 to 0.3.1. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain-community==0.2.16...langchain-community==0.3.1) --- updated-dependencies: - dependency-name: langchain-community dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 205c726c..b5f3960e 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -11,7 +11,7 @@ frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 langchain==0.3.1 -langchain-community==0.2.16 +langchain-community==0.3.1 marshmallow==3.22.0 marshmallow-enum==1.5.1 multidict==6.0.5 From 0a8f627cce98be2c4469309b5a54b45b97930b63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:40:36 +0200 Subject: [PATCH 0224/1530] chore(deps): Bump gradio from 4.38.1 to 4.44.1 in /backend/python/openvoice (#3701) chore(deps): Bump gradio in /backend/python/openvoice Bumps [gradio](https://github.com/gradio-app/gradio) from 4.38.1 to 4.44.1. - [Release notes](https://github.com/gradio-app/gradio/releases) - [Changelog](https://github.com/gradio-app/gradio/blob/main/CHANGELOG.md) - [Commits](https://github.com/gradio-app/gradio/compare/gradio@4.38.1...gradio@4.44.1) --- updated-dependencies: - dependency-name: gradio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/openvoice/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index c568dab1..687efe78 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -18,6 +18,6 @@ python-dotenv pypinyin==0.50.0 cn2an==0.5.22 jieba==0.42.1 -gradio==4.38.1 +gradio==4.44.1 langid==1.1.6 git+https://github.com/myshell-ai/MeloTTS.git From 2649407f44cf7c1c822fb671c6501ec899d1fc6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:40:49 +0200 Subject: [PATCH 0225/1530] chore(deps): Bump llama-index from 0.11.12 to 0.11.14 in /examples/langchain-chroma (#3695) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.12 to 0.11.14. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.12...v0.11.14) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index fda5f9d8..d84311b3 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.1 openai==1.50.2 chromadb==0.5.11 -llama-index==0.11.12 \ No newline at end of file +llama-index==0.11.14 \ No newline at end of file From 53f406dc35485df76450f65ba11ba548cb86f196 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:41:04 +0200 Subject: [PATCH 0226/1530] chore(deps): Bump aiohttp from 3.10.3 to 3.10.8 in /examples/langchain/langchainpy-localai-example (#3705) chore(deps): Bump aiohttp Bumps [aiohttp](https://github.com/aio-libs/aiohttp) from 3.10.3 to 3.10.8. - [Release notes](https://github.com/aio-libs/aiohttp/releases) - [Changelog](https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/aiohttp/compare/v3.10.3...v3.10.8) --- updated-dependencies: - dependency-name: aiohttp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index b5f3960e..53812966 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -1,4 +1,4 @@ -aiohttp==3.10.3 +aiohttp==3.10.8 aiosignal==1.3.1 async-timeout==4.0.3 attrs==24.2.0 From a30058b80f1b23407188a689ec514385ccfa63f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:41:16 +0200 Subject: [PATCH 0227/1530] chore(deps): Bump yarl from 1.11.1 to 1.13.1 in /examples/langchain/langchainpy-localai-example (#3706) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.11.1 to 1.13.1. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.11.1...v1.13.1) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 53812966..1d48dee8 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.3 -yarl==1.11.1 +yarl==1.13.1 From 139209353f74100e495471dfbc41f9900a2212fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:41:30 +0200 Subject: [PATCH 0228/1530] chore(deps): Bump llama-index from 0.11.12 to 0.11.14 in /examples/chainlit (#3707) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.12 to 0.11.14. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.12...v0.11.14) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 92eb113e..ee6c63ac 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.12 +llama_index==0.11.14 requests==2.32.3 weaviate_client==4.8.1 transformers From f84b55d1efdc1484c876c25b53220da63adce848 Mon Sep 17 00:00:00 2001 From: siddimore Date: Tue, 1 Oct 2024 05:41:20 -0700 Subject: [PATCH 0229/1530] feat: Add Get Token Metrics to GRPC server (#3687) * Add Get Token Metrics to GRPC server Signed-off-by: Siddharth More * Expose LocalAI endpoint Signed-off-by: Siddharth More --------- Signed-off-by: Siddharth More --- backend/backend.proto | 13 ++++ backend/cpp/llama/grpc-server.cpp | 35 +++++++++++ core/backend/token_metrics.go | 44 ++++++++++++++ .../endpoints/localai/get_token_metrics.go | 60 +++++++++++++++++++ core/schema/localai.go | 4 ++ pkg/grpc/backend.go | 2 + pkg/grpc/client.go | 18 ++++++ pkg/grpc/embed.go | 4 ++ 8 files changed, 180 insertions(+) create mode 100644 core/backend/token_metrics.go create mode 100644 core/http/endpoints/localai/get_token_metrics.go diff --git a/backend/backend.proto b/backend/backend.proto index b2d4518e..568655b6 100644 --- a/backend/backend.proto +++ b/backend/backend.proto @@ -26,6 +26,19 @@ service Backend { rpc StoresFind(StoresFindOptions) returns (StoresFindResult) {} rpc Rerank(RerankRequest) returns (RerankResult) {} + + rpc GetMetrics(MetricsRequest) returns (MetricsResponse); +} + +// Define the empty request +message MetricsRequest {} + +message MetricsResponse { + int32 slot_id = 1; + string prompt_json_for_slot = 2; // Stores the prompt as a JSON string. + float tokens_per_second = 3; + int32 tokens_generated = 4; + int32 prompt_tokens_processed = 5; } message RerankRequest { diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index 791612db..be99bf76 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -495,6 +495,16 @@ struct llama_server_context } } + llama_client_slot* get_active_slot() { + for (llama_client_slot& slot : slots) { + // Check if the slot is currently processing + if (slot.is_processing()) { + return &slot; // Return the active slot + } + } + return nullptr; // No active slot found + } + void initialize() { // create slots all_slots_are_idle = true; @@ -2420,6 +2430,31 @@ public: return grpc::Status::OK; } + + grpc::Status GetMetrics(ServerContext* context, const backend::MetricsRequest* request, backend::MetricsResponse* response) { + llama_client_slot* active_slot = llama.get_active_slot(); + + if (active_slot != nullptr) { + // Calculate the tokens per second using existing logic + double tokens_per_second = 1e3 / active_slot->t_token_generation * active_slot->n_decoded; + + // Populate the response with metrics + response->set_slot_id(active_slot->id); + response->set_prompt_json_for_slot(active_slot->prompt.dump()); + response->set_tokens_per_second(tokens_per_second); + response->set_tokens_generated(active_slot->n_decoded); + response->set_prompt_tokens_processed(active_slot->num_prompt_tokens_processed); + } else { + // Handle case when no active slot exists + response->set_slot_id(0); + response->set_prompt_json_for_slot(""); + response->set_tokens_per_second(0); + response->set_tokens_generated(0); + response->set_prompt_tokens_processed(0); + } + + return grpc::Status::OK; + } }; void RunServer(const std::string& server_address) { diff --git a/core/backend/token_metrics.go b/core/backend/token_metrics.go new file mode 100644 index 00000000..cd715108 --- /dev/null +++ b/core/backend/token_metrics.go @@ -0,0 +1,44 @@ +package backend + +import ( + "context" + "fmt" + + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/pkg/grpc/proto" + model "github.com/mudler/LocalAI/pkg/model" +) + +func TokenMetrics( + backend, + modelFile string, + loader *model.ModelLoader, + appConfig *config.ApplicationConfig, + backendConfig config.BackendConfig) (*proto.MetricsResponse, error) { + bb := backend + if bb == "" { + return nil, fmt.Errorf("backend is required") + } + + grpcOpts := GRPCModelOpts(backendConfig) + + opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ + model.WithBackendString(bb), + model.WithModel(modelFile), + model.WithContext(appConfig.Context), + model.WithAssetDir(appConfig.AssetsDestination), + model.WithLoadGRPCLoadModelOpts(grpcOpts), + }) + model, err := loader.BackendLoader(opts...) + if err != nil { + return nil, err + } + + if model == nil { + return nil, fmt.Errorf("could not loadmodel model") + } + + res, err := model.GetTokenMetrics(context.Background(), &proto.MetricsRequest{}) + + return res, err +} diff --git a/core/http/endpoints/localai/get_token_metrics.go b/core/http/endpoints/localai/get_token_metrics.go new file mode 100644 index 00000000..95e79bac --- /dev/null +++ b/core/http/endpoints/localai/get_token_metrics.go @@ -0,0 +1,60 @@ +package localai + +import ( + "github.com/gofiber/fiber/v2" + "github.com/mudler/LocalAI/core/backend" + "github.com/mudler/LocalAI/core/config" + fiberContext "github.com/mudler/LocalAI/core/http/ctx" + "github.com/mudler/LocalAI/core/schema" + "github.com/rs/zerolog/log" + + "github.com/mudler/LocalAI/pkg/model" +) + +// TokenMetricsEndpoint is an endpoint to get TokensProcessed Per Second for Active SlotID +// +// @Summary Get TokenMetrics for Active Slot. +// @Accept json +// @Produce audio/x-wav +// @Success 200 {string} binary "generated audio/wav file" +// @Router /v1/tokenMetrics [get] +// @Router /tokenMetrics [get] +func TokenMetricsEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error { + return func(c *fiber.Ctx) error { + + input := new(schema.TokenMetricsRequest) + + // Get input data from the request body + if err := c.BodyParser(input); err != nil { + return err + } + + modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.Model, false) + if err != nil { + modelFile = input.Model + log.Warn().Msgf("Model not found in context: %s", input.Model) + } + + cfg, err := cl.LoadBackendConfigFileByName(modelFile, appConfig.ModelPath, + config.LoadOptionDebug(appConfig.Debug), + config.LoadOptionThreads(appConfig.Threads), + config.LoadOptionContextSize(appConfig.ContextSize), + config.LoadOptionF16(appConfig.F16), + ) + + if err != nil { + log.Err(err) + modelFile = input.Model + log.Warn().Msgf("Model not found in context: %s", input.Model) + } else { + modelFile = cfg.Model + } + log.Debug().Msgf("Token Metrics for model: %s", modelFile) + + response, err := backend.TokenMetrics(cfg.Backend, modelFile, ml, appConfig, *cfg) + if err != nil { + return err + } + return c.JSON(response) + } +} diff --git a/core/schema/localai.go b/core/schema/localai.go index 75fa40c7..cdc3e5b0 100644 --- a/core/schema/localai.go +++ b/core/schema/localai.go @@ -10,6 +10,10 @@ type BackendMonitorRequest struct { Model string `json:"model" yaml:"model"` } +type TokenMetricsRequest struct { + Model string `json:"model" yaml:"model"` +} + type BackendMonitorResponse struct { MemoryInfo *gopsutil.MemoryInfoStat MemoryPercent float32 diff --git a/pkg/grpc/backend.go b/pkg/grpc/backend.go index 85c9e5bc..637a6db1 100644 --- a/pkg/grpc/backend.go +++ b/pkg/grpc/backend.go @@ -51,4 +51,6 @@ type Backend interface { StoresFind(ctx context.Context, in *pb.StoresFindOptions, opts ...grpc.CallOption) (*pb.StoresFindResult, error) Rerank(ctx context.Context, in *pb.RerankRequest, opts ...grpc.CallOption) (*pb.RerankResult, error) + + GetTokenMetrics(ctx context.Context, in *pb.MetricsRequest, opts ...grpc.CallOption) (*pb.MetricsResponse, error) } diff --git a/pkg/grpc/client.go b/pkg/grpc/client.go index 032c9c00..14481620 100644 --- a/pkg/grpc/client.go +++ b/pkg/grpc/client.go @@ -374,3 +374,21 @@ func (c *Client) Rerank(ctx context.Context, in *pb.RerankRequest, opts ...grpc. client := pb.NewBackendClient(conn) return client.Rerank(ctx, in, opts...) } + +func (c *Client) GetTokenMetrics(ctx context.Context, in *pb.MetricsRequest, opts ...grpc.CallOption) (*pb.MetricsResponse, error) { + if !c.parallel { + c.opMutex.Lock() + defer c.opMutex.Unlock() + } + c.setBusy(true) + defer c.setBusy(false) + c.wdMark() + defer c.wdUnMark() + conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + defer conn.Close() + client := pb.NewBackendClient(conn) + return client.GetMetrics(ctx, in, opts...) +} diff --git a/pkg/grpc/embed.go b/pkg/grpc/embed.go index 3155ff59..cf624344 100644 --- a/pkg/grpc/embed.go +++ b/pkg/grpc/embed.go @@ -87,6 +87,10 @@ func (e *embedBackend) Rerank(ctx context.Context, in *pb.RerankRequest, opts .. return e.s.Rerank(ctx, in) } +func (e *embedBackend) GetTokenMetrics(ctx context.Context, in *pb.MetricsRequest, opts ...grpc.CallOption) (*pb.MetricsResponse, error) { + return e.s.GetMetrics(ctx, in) +} + type embedBackendServerStream struct { ctx context.Context fn func(s []byte) From 307a835199d0f10b8095ca5665accb9f25468073 Mon Sep 17 00:00:00 2001 From: Dave Date: Tue, 1 Oct 2024 14:55:46 -0400 Subject: [PATCH 0230/1530] groundwork: ListModels Filtering Upgrade (#2773) * seperate the filtering from the middleware changes --------- Signed-off-by: Dave Lee --- core/cli/util.go | 38 +++++- core/config/backend_config.go | 147 ++++++++++++++++++++++-- core/config/backend_config_filter.go | 35 ++++++ core/config/backend_config_loader.go | 20 ++++ core/config/backend_config_test.go | 102 +++++++++++++++- core/http/ctx/fiber.go | 4 +- core/http/endpoints/localai/welcome.go | 12 +- core/http/endpoints/openai/assistant.go | 2 +- core/http/endpoints/openai/list.go | 38 +++--- core/http/routes/ui.go | 6 +- core/services/list_models.go | 64 +++++------ 11 files changed, 387 insertions(+), 81 deletions(-) create mode 100644 core/config/backend_config_filter.go diff --git a/core/cli/util.go b/core/cli/util.go index b3e545d8..57b8ad9e 100644 --- a/core/cli/util.go +++ b/core/cli/util.go @@ -15,8 +15,9 @@ import ( ) type UtilCMD struct { - GGUFInfo GGUFInfoCMD `cmd:"" name:"gguf-info" help:"Get information about a GGUF file"` - HFScan HFScanCMD `cmd:"" name:"hf-scan" help:"Checks installed models for known security issues. WARNING: this is a best-effort feature and may not catch everything!"` + GGUFInfo GGUFInfoCMD `cmd:"" name:"gguf-info" help:"Get information about a GGUF file"` + HFScan HFScanCMD `cmd:"" name:"hf-scan" help:"Checks installed models for known security issues. WARNING: this is a best-effort feature and may not catch everything!"` + UsecaseHeuristic UsecaseHeuristicCMD `cmd:"" name:"usecase-heuristic" help:"Checks a specific model config and prints what usecase LocalAI will offer for it."` } type GGUFInfoCMD struct { @@ -30,6 +31,11 @@ type HFScanCMD struct { ToScan []string `arg:""` } +type UsecaseHeuristicCMD struct { + ConfigName string `name:"The config file to check"` + ModelsPath string `env:"LOCALAI_MODELS_PATH,MODELS_PATH" type:"path" default:"${basepath}/models" help:"Path containing models used for inferencing" group:"storage"` +} + func (u *GGUFInfoCMD) Run(ctx *cliContext.Context) error { if u.Args == nil || len(u.Args) == 0 { return fmt.Errorf("no GGUF file provided") @@ -99,3 +105,31 @@ func (hfscmd *HFScanCMD) Run(ctx *cliContext.Context) error { return nil } } + +func (uhcmd *UsecaseHeuristicCMD) Run(ctx *cliContext.Context) error { + if len(uhcmd.ConfigName) == 0 { + log.Error().Msg("ConfigName is a required parameter") + return fmt.Errorf("config name is a required parameter") + } + if len(uhcmd.ModelsPath) == 0 { + log.Error().Msg("ModelsPath is a required parameter") + return fmt.Errorf("model path is a required parameter") + } + bcl := config.NewBackendConfigLoader(uhcmd.ModelsPath) + err := bcl.LoadBackendConfig(uhcmd.ConfigName) + if err != nil { + log.Error().Err(err).Str("ConfigName", uhcmd.ConfigName).Msg("error while loading backend") + return err + } + bc, exists := bcl.GetBackendConfig(uhcmd.ConfigName) + if !exists { + log.Error().Str("ConfigName", uhcmd.ConfigName).Msg("ConfigName not found") + } + for name, uc := range config.GetAllBackendConfigUsecases() { + if bc.HasUsecases(uc) { + log.Info().Str("Usecase", name) + } + } + log.Info().Msg("---") + return nil +} diff --git a/core/config/backend_config.go b/core/config/backend_config.go index 027e18a4..8db94f7c 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -3,11 +3,13 @@ package config import ( "os" "regexp" + "slices" "strings" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/downloader" "github.com/mudler/LocalAI/pkg/functions" + "gopkg.in/yaml.v3" ) const ( @@ -27,13 +29,15 @@ type BackendConfig struct { schema.PredictionOptions `yaml:"parameters"` Name string `yaml:"name"` - F16 *bool `yaml:"f16"` - Threads *int `yaml:"threads"` - Debug *bool `yaml:"debug"` - Roles map[string]string `yaml:"roles"` - Embeddings *bool `yaml:"embeddings"` - Backend string `yaml:"backend"` - TemplateConfig TemplateConfig `yaml:"template"` + F16 *bool `yaml:"f16"` + Threads *int `yaml:"threads"` + Debug *bool `yaml:"debug"` + Roles map[string]string `yaml:"roles"` + Embeddings *bool `yaml:"embeddings"` + Backend string `yaml:"backend"` + TemplateConfig TemplateConfig `yaml:"template"` + KnownUsecaseStrings []string `yaml:"known_usecases"` + KnownUsecases *BackendConfigUsecases `yaml:"-"` PromptStrings, InputStrings []string `yaml:"-"` InputToken [][]int `yaml:"-"` @@ -194,6 +198,17 @@ type TemplateConfig struct { JoinChatMessagesByCharacter *string `yaml:"join_chat_messages_by_character"` } +func (c *BackendConfig) UnmarshalYAML(value *yaml.Node) error { + type BCAlias BackendConfig + var aux BCAlias + if err := value.Decode(&aux); err != nil { + return err + } + *c = BackendConfig(aux) + c.KnownUsecases = GetUsecasesFromYAML(c.KnownUsecaseStrings) + return nil +} + func (c *BackendConfig) SetFunctionCallString(s string) { c.functionCallString = s } @@ -410,3 +425,121 @@ func (c *BackendConfig) Validate() bool { func (c *BackendConfig) HasTemplate() bool { return c.TemplateConfig.Completion != "" || c.TemplateConfig.Edit != "" || c.TemplateConfig.Chat != "" || c.TemplateConfig.ChatMessage != "" } + +type BackendConfigUsecases int + +const ( + FLAG_ANY BackendConfigUsecases = 0b000000000 + FLAG_CHAT BackendConfigUsecases = 0b000000001 + FLAG_COMPLETION BackendConfigUsecases = 0b000000010 + FLAG_EDIT BackendConfigUsecases = 0b000000100 + FLAG_EMBEDDINGS BackendConfigUsecases = 0b000001000 + FLAG_RERANK BackendConfigUsecases = 0b000010000 + FLAG_IMAGE BackendConfigUsecases = 0b000100000 + FLAG_TRANSCRIPT BackendConfigUsecases = 0b001000000 + FLAG_TTS BackendConfigUsecases = 0b010000000 + FLAG_SOUND_GENERATION BackendConfigUsecases = 0b100000000 + + // Common Subsets + FLAG_LLM BackendConfigUsecases = FLAG_CHAT & FLAG_COMPLETION & FLAG_EDIT +) + +func GetAllBackendConfigUsecases() map[string]BackendConfigUsecases { + return map[string]BackendConfigUsecases{ + "FLAG_ANY": FLAG_ANY, + "FLAG_CHAT": FLAG_CHAT, + "FLAG_COMPLETION": FLAG_COMPLETION, + "FLAG_EDIT": FLAG_EDIT, + "FLAG_EMBEDDINGS": FLAG_EMBEDDINGS, + "FLAG_RERANK": FLAG_RERANK, + "FLAG_IMAGE": FLAG_IMAGE, + "FLAG_TRANSCRIPT": FLAG_TRANSCRIPT, + "FLAG_TTS": FLAG_TTS, + "FLAG_SOUND_GENERATION": FLAG_SOUND_GENERATION, + "FLAG_LLM": FLAG_LLM, + } +} + +func GetUsecasesFromYAML(input []string) *BackendConfigUsecases { + if len(input) == 0 { + return nil + } + result := FLAG_ANY + flags := GetAllBackendConfigUsecases() + for _, str := range input { + flag, exists := flags["FLAG_"+strings.ToUpper(str)] + if exists { + result |= flag + } + } + return &result +} + +// HasUsecases examines a BackendConfig and determines which endpoints have a chance of success. +func (c *BackendConfig) HasUsecases(u BackendConfigUsecases) bool { + if (c.KnownUsecases != nil) && ((u & *c.KnownUsecases) == u) { + return true + } + return c.GuessUsecases(u) +} + +// GuessUsecases is a **heuristic based** function, as the backend in question may not be loaded yet, and the config may not record what it's useful at. +// In its current state, this function should ideally check for properties of the config like templates, rather than the direct backend name checks for the lower half. +// This avoids the maintenance burden of updating this list for each new backend - but unfortunately, that's the best option for some services currently. +func (c *BackendConfig) GuessUsecases(u BackendConfigUsecases) bool { + if (u & FLAG_CHAT) == FLAG_CHAT { + if c.TemplateConfig.Chat == "" && c.TemplateConfig.ChatMessage == "" { + return false + } + } + if (u & FLAG_COMPLETION) == FLAG_COMPLETION { + if c.TemplateConfig.Completion == "" { + return false + } + } + if (u & FLAG_EDIT) == FLAG_EDIT { + if c.TemplateConfig.Edit == "" { + return false + } + } + if (u & FLAG_EMBEDDINGS) == FLAG_EMBEDDINGS { + if c.Embeddings == nil || !*c.Embeddings { + return false + } + } + if (u & FLAG_IMAGE) == FLAG_IMAGE { + imageBackends := []string{"diffusers", "tinydream", "stablediffusion"} + if !slices.Contains(imageBackends, c.Backend) { + return false + } + + if c.Backend == "diffusers" && c.Diffusers.PipelineType == "" { + return false + } + + } + if (u & FLAG_RERANK) == FLAG_RERANK { + if c.Backend != "rerankers" { + return false + } + } + if (u & FLAG_TRANSCRIPT) == FLAG_TRANSCRIPT { + if c.Backend != "whisper" { + return false + } + } + if (u & FLAG_TTS) == FLAG_TTS { + ttsBackends := []string{"piper", "transformers-musicgen", "parler-tts"} + if !slices.Contains(ttsBackends, c.Backend) { + return false + } + } + + if (u & FLAG_SOUND_GENERATION) == FLAG_SOUND_GENERATION { + if c.Backend != "transformers-musicgen" { + return false + } + } + + return true +} diff --git a/core/config/backend_config_filter.go b/core/config/backend_config_filter.go new file mode 100644 index 00000000..f1eb2488 --- /dev/null +++ b/core/config/backend_config_filter.go @@ -0,0 +1,35 @@ +package config + +import "regexp" + +type BackendConfigFilterFn func(string, *BackendConfig) bool + +func NoFilterFn(_ string, _ *BackendConfig) bool { return true } + +func BuildNameFilterFn(filter string) (BackendConfigFilterFn, error) { + if filter == "" { + return NoFilterFn, nil + } + rxp, err := regexp.Compile(filter) + if err != nil { + return nil, err + } + return func(name string, config *BackendConfig) bool { + if config != nil { + return rxp.MatchString(config.Name) + } + return rxp.MatchString(name) + }, nil +} + +func BuildUsecaseFilterFn(usecases BackendConfigUsecases) BackendConfigFilterFn { + if usecases == FLAG_ANY { + return NoFilterFn + } + return func(name string, config *BackendConfig) bool { + if config == nil { + return false // TODO: Potentially make this a param, for now, no known usecase to include + } + return config.HasUsecases(usecases) + } +} diff --git a/core/config/backend_config_loader.go b/core/config/backend_config_loader.go index 45fe259e..7fe49bab 100644 --- a/core/config/backend_config_loader.go +++ b/core/config/backend_config_loader.go @@ -201,6 +201,26 @@ func (bcl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig { return res } +func (bcl *BackendConfigLoader) GetBackendConfigsByFilter(filter BackendConfigFilterFn) []BackendConfig { + bcl.Lock() + defer bcl.Unlock() + var res []BackendConfig + + if filter == nil { + filter = NoFilterFn + } + + for n, v := range bcl.configs { + if filter(n, &v) { + res = append(res, v) + } + } + + // TODO: I don't think this one needs to Sort on name... but we'll see what breaks. + + return res +} + func (bcl *BackendConfigLoader) RemoveBackendConfig(m string) { bcl.Lock() defer bcl.Unlock() diff --git a/core/config/backend_config_test.go b/core/config/backend_config_test.go index da245933..04eacb7e 100644 --- a/core/config/backend_config_test.go +++ b/core/config/backend_config_test.go @@ -19,12 +19,17 @@ var _ = Describe("Test cases for config related functions", func() { `backend: "../foo-bar" name: "foo" parameters: - model: "foo-bar"`) + model: "foo-bar" +known_usecases: +- chat +- COMPLETION +`) Expect(err).ToNot(HaveOccurred()) config, err := readBackendConfigFromFile(tmp.Name()) Expect(err).To(BeNil()) Expect(config).ToNot(BeNil()) Expect(config.Validate()).To(BeFalse()) + Expect(config.KnownUsecases).ToNot(BeNil()) }) It("Test Validate", func() { tmp, err := os.CreateTemp("", "config.yaml") @@ -61,4 +66,99 @@ parameters: Expect(config.Validate()).To(BeTrue()) }) }) + It("Properly handles backend usecase matching", func() { + + a := BackendConfig{ + Name: "a", + } + Expect(a.HasUsecases(FLAG_ANY)).To(BeTrue()) // FLAG_ANY just means the config _exists_ essentially. + + b := BackendConfig{ + Name: "b", + Backend: "stablediffusion", + } + Expect(b.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(b.HasUsecases(FLAG_IMAGE)).To(BeTrue()) + Expect(b.HasUsecases(FLAG_CHAT)).To(BeFalse()) + + c := BackendConfig{ + Name: "c", + Backend: "llama-cpp", + TemplateConfig: TemplateConfig{ + Chat: "chat", + }, + } + Expect(c.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(c.HasUsecases(FLAG_IMAGE)).To(BeFalse()) + Expect(c.HasUsecases(FLAG_COMPLETION)).To(BeFalse()) + Expect(c.HasUsecases(FLAG_CHAT)).To(BeTrue()) + + d := BackendConfig{ + Name: "d", + Backend: "llama-cpp", + TemplateConfig: TemplateConfig{ + Chat: "chat", + Completion: "completion", + }, + } + Expect(d.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(d.HasUsecases(FLAG_IMAGE)).To(BeFalse()) + Expect(d.HasUsecases(FLAG_COMPLETION)).To(BeTrue()) + Expect(d.HasUsecases(FLAG_CHAT)).To(BeTrue()) + + trueValue := true + e := BackendConfig{ + Name: "e", + Backend: "llama-cpp", + TemplateConfig: TemplateConfig{ + Completion: "completion", + }, + Embeddings: &trueValue, + } + + Expect(e.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(e.HasUsecases(FLAG_IMAGE)).To(BeFalse()) + Expect(e.HasUsecases(FLAG_COMPLETION)).To(BeTrue()) + Expect(e.HasUsecases(FLAG_CHAT)).To(BeFalse()) + Expect(e.HasUsecases(FLAG_EMBEDDINGS)).To(BeTrue()) + + f := BackendConfig{ + Name: "f", + Backend: "piper", + } + Expect(f.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(f.HasUsecases(FLAG_TTS)).To(BeTrue()) + Expect(f.HasUsecases(FLAG_CHAT)).To(BeFalse()) + + g := BackendConfig{ + Name: "g", + Backend: "whisper", + } + Expect(g.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(g.HasUsecases(FLAG_TRANSCRIPT)).To(BeTrue()) + Expect(g.HasUsecases(FLAG_TTS)).To(BeFalse()) + + h := BackendConfig{ + Name: "h", + Backend: "transformers-musicgen", + } + Expect(h.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(h.HasUsecases(FLAG_TRANSCRIPT)).To(BeFalse()) + Expect(h.HasUsecases(FLAG_TTS)).To(BeTrue()) + Expect(h.HasUsecases(FLAG_SOUND_GENERATION)).To(BeTrue()) + + knownUsecases := FLAG_CHAT | FLAG_COMPLETION + i := BackendConfig{ + Name: "i", + Backend: "whisper", + // Earlier test checks parsing, this just needs to set final values + KnownUsecases: &knownUsecases, + } + Expect(i.HasUsecases(FLAG_ANY)).To(BeTrue()) + Expect(i.HasUsecases(FLAG_TRANSCRIPT)).To(BeTrue()) + Expect(i.HasUsecases(FLAG_TTS)).To(BeFalse()) + Expect(i.HasUsecases(FLAG_COMPLETION)).To(BeTrue()) + Expect(i.HasUsecases(FLAG_CHAT)).To(BeTrue()) + + }) }) diff --git a/core/http/ctx/fiber.go b/core/http/ctx/fiber.go index 94059847..28a35ac4 100644 --- a/core/http/ctx/fiber.go +++ b/core/http/ctx/fiber.go @@ -21,12 +21,12 @@ func ModelFromContext(ctx *fiber.Ctx, cl *config.BackendConfigLoader, loader *mo } // Set model from bearer token, if available - bearer := strings.TrimLeft(ctx.Get("authorization"), "Bearer ") + bearer := strings.TrimLeft(ctx.Get("authorization"), "Bear ") // Reduced duplicate characters of Bearer bearerExists := bearer != "" && loader.ExistsInModelPath(bearer) // If no model was specified, take the first available if modelInput == "" && !bearerExists && firstModel { - models, _ := services.ListModels(cl, loader, "", true) + models, _ := services.ListModels(cl, loader, config.NoFilterFn, services.SKIP_IF_CONFIGURED) if len(models) > 0 { modelInput = models[0] log.Debug().Msgf("No model specified, using: %s", modelInput) diff --git a/core/http/endpoints/localai/welcome.go b/core/http/endpoints/localai/welcome.go index 396c4084..0518ceac 100644 --- a/core/http/endpoints/localai/welcome.go +++ b/core/http/endpoints/localai/welcome.go @@ -13,7 +13,7 @@ import ( func WelcomeEndpoint(appConfig *config.ApplicationConfig, cl *config.BackendConfigLoader, ml *model.ModelLoader, modelStatus func() (map[string]string, map[string]string)) func(*fiber.Ctx) error { return func(c *fiber.Ctx) error { - models, _ := services.ListModels(cl, ml, "", true) + models, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) backendConfigs := cl.GetAllBackendConfigs() galleryConfigs := map[string]*gallery.Config{} @@ -32,18 +32,10 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig, // Get model statuses to display in the UI the operation in progress processingModels, taskTypes := modelStatus() - modelsWithoutConfig := []string{} - - for _, m := range models { - if _, ok := modelsWithBackendConfig[m]; !ok { - modelsWithoutConfig = append(modelsWithoutConfig, m) - } - } - summary := fiber.Map{ "Title": "LocalAI API - " + internal.PrintableVersion(), "Version": internal.PrintableVersion(), - "Models": modelsWithoutConfig, + "Models": models, "ModelsConfig": backendConfigs, "GalleryConfig": galleryConfigs, "IsP2PEnabled": p2p.IsP2PEnabled(), diff --git a/core/http/endpoints/openai/assistant.go b/core/http/endpoints/openai/assistant.go index ff218730..3240e8ee 100644 --- a/core/http/endpoints/openai/assistant.go +++ b/core/http/endpoints/openai/assistant.go @@ -225,7 +225,7 @@ func filterAssistantsAfterID(assistants []Assistant, id string) []Assistant { func modelExists(cl *config.BackendConfigLoader, ml *model.ModelLoader, modelName string) (found bool) { found = false - models, err := services.ListModels(cl, ml, "", true) + models, err := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) if err != nil { return } diff --git a/core/http/endpoints/openai/list.go b/core/http/endpoints/openai/list.go index d446b100..80dcb3e4 100644 --- a/core/http/endpoints/openai/list.go +++ b/core/http/endpoints/openai/list.go @@ -18,32 +18,32 @@ func ListModelsEndpoint(bcl *config.BackendConfigLoader, ml *model.ModelLoader) filter := c.Query("filter") // By default, exclude any loose files that are already referenced by a configuration file. - excludeConfigured := c.QueryBool("excludeConfigured", true) + var policy services.LooseFilePolicy + if c.QueryBool("excludeConfigured", true) { + policy = services.SKIP_IF_CONFIGURED + } else { + policy = services.ALWAYS_INCLUDE // This replicates current behavior. TODO: give more options to the user? + } - dataModels, err := modelList(bcl, ml, filter, excludeConfigured) + filterFn, err := config.BuildNameFilterFn(filter) if err != nil { return err } + + modelNames, err := services.ListModels(bcl, ml, filterFn, policy) + if err != nil { + return err + } + + // Map from a slice of names to a slice of OpenAIModel response objects + dataModels := []schema.OpenAIModel{} + for _, m := range modelNames { + dataModels = append(dataModels, schema.OpenAIModel{ID: m, Object: "model"}) + } + return c.JSON(schema.ModelsDataResponse{ Object: "list", Data: dataModels, }) } } - -func modelList(bcl *config.BackendConfigLoader, ml *model.ModelLoader, filter string, excludeConfigured bool) ([]schema.OpenAIModel, error) { - - models, err := services.ListModels(bcl, ml, filter, excludeConfigured) - if err != nil { - return nil, err - } - - dataModels := []schema.OpenAIModel{} - - // Then iterate through the loose files: - for _, m := range models { - dataModels = append(dataModels, schema.OpenAIModel{ID: m, Object: "model"}) - } - - return dataModels, nil -} diff --git a/core/http/routes/ui.go b/core/http/routes/ui.go index 7b2c6ae7..cfe9368c 100644 --- a/core/http/routes/ui.go +++ b/core/http/routes/ui.go @@ -303,7 +303,7 @@ func RegisterUIRoutes(app *fiber.App, // Show the Chat page app.Get("/chat/:model", func(c *fiber.Ctx) error { - backendConfigs, _ := services.ListModels(cl, ml, "", true) + backendConfigs, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) summary := fiber.Map{ "Title": "LocalAI - Chat with " + c.Params("model"), @@ -318,7 +318,7 @@ func RegisterUIRoutes(app *fiber.App, }) app.Get("/talk/", func(c *fiber.Ctx) error { - backendConfigs, _ := services.ListModels(cl, ml, "", true) + backendConfigs, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) if len(backendConfigs) == 0 { // If no model is available redirect to the index which suggests how to install models @@ -339,7 +339,7 @@ func RegisterUIRoutes(app *fiber.App, app.Get("/chat/", func(c *fiber.Ctx) error { - backendConfigs, _ := services.ListModels(cl, ml, "", true) + backendConfigs, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) if len(backendConfigs) == 0 { // If no model is available redirect to the index which suggests how to install models diff --git a/core/services/list_models.go b/core/services/list_models.go index 4b578e25..c310ac15 100644 --- a/core/services/list_models.go +++ b/core/services/list_models.go @@ -1,55 +1,47 @@ package services import ( - "regexp" - "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/pkg/model" ) -func ListModels(bcl *config.BackendConfigLoader, ml *model.ModelLoader, filter string, excludeConfigured bool) ([]string, error) { +type LooseFilePolicy int - models, err := ml.ListFilesInModelPath() - if err != nil { - return nil, err - } +const ( + SKIP_IF_CONFIGURED LooseFilePolicy = iota + SKIP_ALWAYS + ALWAYS_INCLUDE + LOOSE_ONLY +) - var mm map[string]interface{} = map[string]interface{}{} +func ListModels(bcl *config.BackendConfigLoader, ml *model.ModelLoader, filter config.BackendConfigFilterFn, looseFilePolicy LooseFilePolicy) ([]string, error) { + + var skipMap map[string]interface{} = map[string]interface{}{} dataModels := []string{} - var filterFn func(name string) bool - - // If filter is not specified, do not filter the list by model name - if filter == "" { - filterFn = func(_ string) bool { return true } - } else { - // If filter _IS_ specified, we compile it to a regex which is used to create the filterFn - rxp, err := regexp.Compile(filter) - if err != nil { - return nil, err - } - filterFn = func(name string) bool { - return rxp.MatchString(name) - } - } - - // Start with the known configurations - for _, c := range bcl.GetAllBackendConfigs() { - if excludeConfigured { - mm[c.Model] = nil - } - - if filterFn(c.Name) { + // Start with known configurations + if looseFilePolicy != LOOSE_ONLY { + for _, c := range bcl.GetBackendConfigsByFilter(filter) { + if looseFilePolicy == SKIP_IF_CONFIGURED { + skipMap[c.Model] = nil + } dataModels = append(dataModels, c.Name) } } - // Then iterate through the loose files: - for _, m := range models { - // And only adds them if they shouldn't be skipped. - if _, exists := mm[m]; !exists && filterFn(m) { - dataModels = append(dataModels, m) + // Then iterate through the loose files if requested. + if looseFilePolicy != SKIP_ALWAYS { + + models, err := ml.ListFilesInModelPath() + if err != nil { + return nil, err + } + for _, m := range models { + // And only adds them if they shouldn't be skipped. + if _, exists := skipMap[m]; !exists && filter(m, nil) { + dataModels = append(dataModels, m) + } } } From 88b99d30bbd0700238673359c086b32064bd8c19 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 1 Oct 2024 22:47:51 +0200 Subject: [PATCH 0231/1530] Update README.md update hot topics --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5b9a2c43..28f5bd22 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,7 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu Hot topics (looking for contributors): +- Realtime API https://github.com/mudler/LocalAI/issues/3714 - 🔥🔥 Distributed, P2P Global community pools: https://github.com/mudler/LocalAI/issues/3113 - WebUI improvements: https://github.com/mudler/LocalAI/issues/2156 - Backends v2: https://github.com/mudler/LocalAI/issues/1126 From 2cc3b7128ea78a16c54fa08d1cbd7bd4d819117c Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 2 Oct 2024 00:45:33 +0200 Subject: [PATCH 0232/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `2ef717b293fe93872cc3a03ca77942936a281959` (#3712) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6c6dbd21..f803ed49 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=8feb375fbdf0277ad36958c218c6bf48fa0ba75a +WHISPER_CPP_VERSION?=2ef717b293fe93872cc3a03ca77942936a281959 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From db704199dc0b69c248f1f41ab1cfb524e8682555 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 2 Oct 2024 00:45:45 +0200 Subject: [PATCH 0233/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `3f1ae2e32cde00c39b96be6d01c2997c29bae555` (#3713) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f803ed49..27232b9a 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=6f1d9d71f4c568778a7637ff6582e6f6ba5fb9d3 +CPPLLAMA_VERSION?=3f1ae2e32cde00c39b96be6d01c2997c29bae555 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 0965c6cd68e3a813947c50d4c0ed3e44b14a9935 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 2 Oct 2024 08:55:58 +0200 Subject: [PATCH 0234/1530] feat: track internally started models by ID (#3693) * chore(refactor): track internally started models by ID Signed-off-by: Ettore Di Giacinto * Just extend options, no need to copy Signed-off-by: Ettore Di Giacinto * Improve debugging for rerankers failures Signed-off-by: Ettore Di Giacinto * Simplify model loading with rerankers Signed-off-by: Ettore Di Giacinto * Be more consistent when generating model options Signed-off-by: Ettore Di Giacinto * Uncommitted code Signed-off-by: Ettore Di Giacinto * Make deleteProcess more idiomatic Signed-off-by: Ettore Di Giacinto * Adapt CLI for sound generation Signed-off-by: Ettore Di Giacinto * Fixup threads definition Signed-off-by: Ettore Di Giacinto * Handle corner case where c.Seed is nil Signed-off-by: Ettore Di Giacinto * Consistently use ModelOptions Signed-off-by: Ettore Di Giacinto * Adapt new code to refactoring Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto Co-authored-by: Dave --- core/backend/embeddings.go | 11 +- core/backend/image.go | 15 +-- core/backend/llm.go | 13 +-- core/backend/options.go | 101 +++++++++++++++--- core/backend/rerank.go | 16 +-- core/backend/soundgeneration.go | 13 +-- core/backend/token_metrics.go | 13 +-- core/backend/transcript.go | 12 +-- core/backend/tts.go | 7 +- core/cli/soundgeneration.go | 3 +- .../endpoints/elevenlabs/soundgeneration.go | 2 +- core/http/endpoints/jina/rerank.go | 4 +- .../endpoints/localai/get_token_metrics.go | 2 +- core/startup/startup.go | 8 +- pkg/model/initializers.go | 56 ++++------ pkg/model/loader.go | 8 +- pkg/model/loader_test.go | 22 ++-- pkg/model/options.go | 14 +-- pkg/model/process.go | 28 +++-- tests/e2e-aio/e2e_test.go | 6 +- 20 files changed, 169 insertions(+), 185 deletions(-) diff --git a/core/backend/embeddings.go b/core/backend/embeddings.go index 9f0f8be9..264d947b 100644 --- a/core/backend/embeddings.go +++ b/core/backend/embeddings.go @@ -10,20 +10,11 @@ import ( ) func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() ([]float32, error), error) { - modelFile := backendConfig.Model - - grpcOpts := GRPCModelOpts(backendConfig) var inferenceModel interface{} var err error - opts := modelOpts(backendConfig, appConfig, []model.Option{ - model.WithLoadGRPCLoadModelOpts(grpcOpts), - model.WithThreads(uint32(*backendConfig.Threads)), - model.WithAssetDir(appConfig.AssetsDestination), - model.WithModel(modelFile), - model.WithContext(appConfig.Context), - }) + opts := ModelOptions(backendConfig, appConfig, []model.Option{}) if backendConfig.Backend == "" { inferenceModel, err = loader.GreedyLoader(opts...) diff --git a/core/backend/image.go b/core/backend/image.go index 5c2a950c..72c0007c 100644 --- a/core/backend/image.go +++ b/core/backend/image.go @@ -8,19 +8,8 @@ import ( ) func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negative_prompt, src, dst string, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() error, error) { - threads := backendConfig.Threads - if *threads == 0 && appConfig.Threads != 0 { - threads = &appConfig.Threads - } - gRPCOpts := GRPCModelOpts(backendConfig) - opts := modelOpts(backendConfig, appConfig, []model.Option{ - model.WithBackendString(backendConfig.Backend), - model.WithAssetDir(appConfig.AssetsDestination), - model.WithThreads(uint32(*threads)), - model.WithContext(appConfig.Context), - model.WithModel(backendConfig.Model), - model.WithLoadGRPCLoadModelOpts(gRPCOpts), - }) + + opts := ModelOptions(backendConfig, appConfig, []model.Option{}) inferenceModel, err := loader.BackendLoader( opts..., diff --git a/core/backend/llm.go b/core/backend/llm.go index cac9beba..d946d3f8 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -33,22 +33,11 @@ type TokenUsage struct { func ModelInference(ctx context.Context, s string, messages []schema.Message, images, videos, audios []string, loader *model.ModelLoader, c config.BackendConfig, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) { modelFile := c.Model - threads := c.Threads - if *threads == 0 && o.Threads != 0 { - threads = &o.Threads - } - grpcOpts := GRPCModelOpts(c) var inferenceModel grpc.Backend var err error - opts := modelOpts(c, o, []model.Option{ - model.WithLoadGRPCLoadModelOpts(grpcOpts), - model.WithThreads(uint32(*threads)), // some models uses this to allocate threads during startup - model.WithAssetDir(o.AssetsDestination), - model.WithModel(modelFile), - model.WithContext(o.Context), - }) + opts := ModelOptions(c, o, []model.Option{}) if c.Backend != "" { opts = append(opts, model.WithBackendString(c.Backend)) diff --git a/core/backend/options.go b/core/backend/options.go index d431aab6..90d563e0 100644 --- a/core/backend/options.go +++ b/core/backend/options.go @@ -11,32 +11,65 @@ import ( "github.com/rs/zerolog/log" ) -func modelOpts(c config.BackendConfig, so *config.ApplicationConfig, opts []model.Option) []model.Option { +func ModelOptions(c config.BackendConfig, so *config.ApplicationConfig, opts []model.Option) []model.Option { + name := c.Name + if name == "" { + name = c.Model + } + + defOpts := []model.Option{ + model.WithBackendString(c.Backend), + model.WithModel(c.Model), + model.WithAssetDir(so.AssetsDestination), + model.WithContext(so.Context), + model.WithModelID(name), + } + + threads := 1 + + if c.Threads != nil { + threads = *c.Threads + } + + if so.Threads != 0 { + threads = so.Threads + } + + c.Threads = &threads + + grpcOpts := grpcModelOpts(c) + defOpts = append(defOpts, model.WithLoadGRPCLoadModelOpts(grpcOpts)) + if so.SingleBackend { - opts = append(opts, model.WithSingleActiveBackend()) + defOpts = append(defOpts, model.WithSingleActiveBackend()) } if so.ParallelBackendRequests { - opts = append(opts, model.EnableParallelRequests) + defOpts = append(defOpts, model.EnableParallelRequests) } if c.GRPC.Attempts != 0 { - opts = append(opts, model.WithGRPCAttempts(c.GRPC.Attempts)) + defOpts = append(defOpts, model.WithGRPCAttempts(c.GRPC.Attempts)) } if c.GRPC.AttemptsSleepTime != 0 { - opts = append(opts, model.WithGRPCAttemptsDelay(c.GRPC.AttemptsSleepTime)) + defOpts = append(defOpts, model.WithGRPCAttemptsDelay(c.GRPC.AttemptsSleepTime)) } for k, v := range so.ExternalGRPCBackends { - opts = append(opts, model.WithExternalBackend(k, v)) + defOpts = append(defOpts, model.WithExternalBackend(k, v)) } - return opts + return append(defOpts, opts...) } func getSeed(c config.BackendConfig) int32 { - seed := int32(*c.Seed) + var seed int32 = config.RAND_SEED + + if c.Seed != nil { + seed = int32(*c.Seed) + } + if seed == config.RAND_SEED { seed = rand.Int31() } @@ -44,11 +77,47 @@ func getSeed(c config.BackendConfig) int32 { return seed } -func GRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { +func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions { b := 512 if c.Batch != 0 { b = c.Batch } + + f16 := false + if c.F16 != nil { + f16 = *c.F16 + } + + embeddings := false + if c.Embeddings != nil { + embeddings = *c.Embeddings + } + + lowVRAM := false + if c.LowVRAM != nil { + lowVRAM = *c.LowVRAM + } + + mmap := false + if c.MMap != nil { + mmap = *c.MMap + } + + ctxSize := 1024 + if c.ContextSize != nil { + ctxSize = *c.ContextSize + } + + mmlock := false + if c.MMlock != nil { + mmlock = *c.MMlock + } + + nGPULayers := 9999999 + if c.NGPULayers != nil { + nGPULayers = *c.NGPULayers + } + return &pb.ModelOptions{ CUDA: c.CUDA || c.Diffusers.CUDA, SchedulerType: c.Diffusers.SchedulerType, @@ -56,14 +125,14 @@ func GRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { CFGScale: c.Diffusers.CFGScale, LoraAdapter: c.LoraAdapter, LoraScale: c.LoraScale, - F16Memory: *c.F16, + F16Memory: f16, LoraBase: c.LoraBase, IMG2IMG: c.Diffusers.IMG2IMG, CLIPModel: c.Diffusers.ClipModel, CLIPSubfolder: c.Diffusers.ClipSubFolder, CLIPSkip: int32(c.Diffusers.ClipSkip), ControlNet: c.Diffusers.ControlNet, - ContextSize: int32(*c.ContextSize), + ContextSize: int32(ctxSize), Seed: getSeed(c), NBatch: int32(b), NoMulMatQ: c.NoMulMatQ, @@ -85,16 +154,16 @@ func GRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { YarnBetaSlow: c.YarnBetaSlow, NGQA: c.NGQA, RMSNormEps: c.RMSNormEps, - MLock: *c.MMlock, + MLock: mmlock, RopeFreqBase: c.RopeFreqBase, RopeScaling: c.RopeScaling, Type: c.ModelType, RopeFreqScale: c.RopeFreqScale, NUMA: c.NUMA, - Embeddings: *c.Embeddings, - LowVRAM: *c.LowVRAM, - NGPULayers: int32(*c.NGPULayers), - MMap: *c.MMap, + Embeddings: embeddings, + LowVRAM: lowVRAM, + NGPULayers: int32(nGPULayers), + MMap: mmap, MainGPU: c.MainGPU, Threads: int32(*c.Threads), TensorSplit: c.TensorSplit, diff --git a/core/backend/rerank.go b/core/backend/rerank.go index a7573ade..f600e2e6 100644 --- a/core/backend/rerank.go +++ b/core/backend/rerank.go @@ -9,21 +9,9 @@ import ( model "github.com/mudler/LocalAI/pkg/model" ) -func Rerank(backend, modelFile string, request *proto.RerankRequest, loader *model.ModelLoader, appConfig *config.ApplicationConfig, backendConfig config.BackendConfig) (*proto.RerankResult, error) { - bb := backend - if bb == "" { - return nil, fmt.Errorf("backend is required") - } +func Rerank(modelFile string, request *proto.RerankRequest, loader *model.ModelLoader, appConfig *config.ApplicationConfig, backendConfig config.BackendConfig) (*proto.RerankResult, error) { - grpcOpts := GRPCModelOpts(backendConfig) - - opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ - model.WithBackendString(bb), - model.WithModel(modelFile), - model.WithContext(appConfig.Context), - model.WithAssetDir(appConfig.AssetsDestination), - model.WithLoadGRPCLoadModelOpts(grpcOpts), - }) + opts := ModelOptions(backendConfig, appConfig, []model.Option{model.WithModel(modelFile)}) rerankModel, err := loader.BackendLoader(opts...) if err != nil { return nil, err diff --git a/core/backend/soundgeneration.go b/core/backend/soundgeneration.go index b6a1c827..b1b458b4 100644 --- a/core/backend/soundgeneration.go +++ b/core/backend/soundgeneration.go @@ -13,7 +13,6 @@ import ( ) func SoundGeneration( - backend string, modelFile string, text string, duration *float32, @@ -25,18 +24,8 @@ func SoundGeneration( appConfig *config.ApplicationConfig, backendConfig config.BackendConfig, ) (string, *proto.Result, error) { - if backend == "" { - return "", nil, fmt.Errorf("backend is a required parameter") - } - grpcOpts := GRPCModelOpts(backendConfig) - opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ - model.WithBackendString(backend), - model.WithModel(modelFile), - model.WithContext(appConfig.Context), - model.WithAssetDir(appConfig.AssetsDestination), - model.WithLoadGRPCLoadModelOpts(grpcOpts), - }) + opts := ModelOptions(backendConfig, appConfig, []model.Option{model.WithModel(modelFile)}) soundGenModel, err := loader.BackendLoader(opts...) if err != nil { diff --git a/core/backend/token_metrics.go b/core/backend/token_metrics.go index cd715108..acd25663 100644 --- a/core/backend/token_metrics.go +++ b/core/backend/token_metrics.go @@ -10,24 +10,13 @@ import ( ) func TokenMetrics( - backend, modelFile string, loader *model.ModelLoader, appConfig *config.ApplicationConfig, backendConfig config.BackendConfig) (*proto.MetricsResponse, error) { - bb := backend - if bb == "" { - return nil, fmt.Errorf("backend is required") - } - grpcOpts := GRPCModelOpts(backendConfig) - - opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ - model.WithBackendString(bb), + opts := ModelOptions(backendConfig, appConfig, []model.Option{ model.WithModel(modelFile), - model.WithContext(appConfig.Context), - model.WithAssetDir(appConfig.AssetsDestination), - model.WithLoadGRPCLoadModelOpts(grpcOpts), }) model, err := loader.BackendLoader(opts...) if err != nil { diff --git a/core/backend/transcript.go b/core/backend/transcript.go index 6ebc7c10..c6ad9b59 100644 --- a/core/backend/transcript.go +++ b/core/backend/transcript.go @@ -14,13 +14,11 @@ import ( func ModelTranscription(audio, language string, translate bool, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.TranscriptionResult, error) { - opts := modelOpts(backendConfig, appConfig, []model.Option{ - model.WithBackendString(model.WhisperBackend), - model.WithModel(backendConfig.Model), - model.WithContext(appConfig.Context), - model.WithThreads(uint32(*backendConfig.Threads)), - model.WithAssetDir(appConfig.AssetsDestination), - }) + if backendConfig.Backend == "" { + backendConfig.Backend = model.WhisperBackend + } + + opts := ModelOptions(backendConfig, appConfig, []model.Option{}) transcriptionModel, err := ml.BackendLoader(opts...) if err != nil { diff --git a/core/backend/tts.go b/core/backend/tts.go index 2401748c..bac2e900 100644 --- a/core/backend/tts.go +++ b/core/backend/tts.go @@ -28,14 +28,9 @@ func ModelTTS( bb = model.PiperBackend } - grpcOpts := GRPCModelOpts(backendConfig) - - opts := modelOpts(config.BackendConfig{}, appConfig, []model.Option{ + opts := ModelOptions(config.BackendConfig{}, appConfig, []model.Option{ model.WithBackendString(bb), model.WithModel(modelFile), - model.WithContext(appConfig.Context), - model.WithAssetDir(appConfig.AssetsDestination), - model.WithLoadGRPCLoadModelOpts(grpcOpts), }) ttsModel, err := loader.BackendLoader(opts...) if err != nil { diff --git a/core/cli/soundgeneration.go b/core/cli/soundgeneration.go index 5711b199..82bc0346 100644 --- a/core/cli/soundgeneration.go +++ b/core/cli/soundgeneration.go @@ -85,13 +85,14 @@ func (t *SoundGenerationCMD) Run(ctx *cliContext.Context) error { options := config.BackendConfig{} options.SetDefaults() + options.Backend = t.Backend var inputFile *string if t.InputFile != "" { inputFile = &t.InputFile } - filePath, _, err := backend.SoundGeneration(t.Backend, t.Model, text, + filePath, _, err := backend.SoundGeneration(t.Model, text, parseToFloat32Ptr(t.Duration), parseToFloat32Ptr(t.Temperature), &t.DoSample, inputFile, parseToInt32Ptr(t.InputFileSampleDivisor), ml, opts, options) diff --git a/core/http/endpoints/elevenlabs/soundgeneration.go b/core/http/endpoints/elevenlabs/soundgeneration.go index 619544d8..345df35b 100644 --- a/core/http/endpoints/elevenlabs/soundgeneration.go +++ b/core/http/endpoints/elevenlabs/soundgeneration.go @@ -55,7 +55,7 @@ func SoundGenerationEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoad } // TODO: Support uploading files? - filePath, _, err := backend.SoundGeneration(cfg.Backend, modelFile, input.Text, input.Duration, input.Temperature, input.DoSample, nil, nil, ml, appConfig, *cfg) + filePath, _, err := backend.SoundGeneration(modelFile, input.Text, input.Duration, input.Temperature, input.DoSample, nil, nil, ml, appConfig, *cfg) if err != nil { return err } diff --git a/core/http/endpoints/jina/rerank.go b/core/http/endpoints/jina/rerank.go index 04fdf031..58c3972d 100644 --- a/core/http/endpoints/jina/rerank.go +++ b/core/http/endpoints/jina/rerank.go @@ -45,13 +45,13 @@ func JINARerankEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a config.LoadOptionContextSize(appConfig.ContextSize), config.LoadOptionF16(appConfig.F16), ) - if err != nil { modelFile = input.Model log.Warn().Msgf("Model not found in context: %s", input.Model) } else { modelFile = cfg.Model } + log.Debug().Msgf("Request for model: %s", modelFile) if input.Backend != "" { @@ -64,7 +64,7 @@ func JINARerankEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a Documents: req.Documents, } - results, err := backend.Rerank(cfg.Backend, modelFile, request, ml, appConfig, *cfg) + results, err := backend.Rerank(modelFile, request, ml, appConfig, *cfg) if err != nil { return err } diff --git a/core/http/endpoints/localai/get_token_metrics.go b/core/http/endpoints/localai/get_token_metrics.go index 95e79bac..e0e6943f 100644 --- a/core/http/endpoints/localai/get_token_metrics.go +++ b/core/http/endpoints/localai/get_token_metrics.go @@ -51,7 +51,7 @@ func TokenMetricsEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, } log.Debug().Msgf("Token Metrics for model: %s", modelFile) - response, err := backend.TokenMetrics(cfg.Backend, modelFile, ml, appConfig, *cfg) + response, err := backend.TokenMetrics(modelFile, ml, appConfig, *cfg) if err != nil { return err } diff --git a/core/startup/startup.go b/core/startup/startup.go index b7b9ce8f..17e54bc0 100644 --- a/core/startup/startup.go +++ b/core/startup/startup.go @@ -160,13 +160,7 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode log.Debug().Msgf("Auto loading model %s into memory from file: %s", m, cfg.Model) - grpcOpts := backend.GRPCModelOpts(*cfg) - o := []model.Option{ - model.WithModel(cfg.Model), - model.WithAssetDir(options.AssetsDestination), - model.WithThreads(uint32(options.Threads)), - model.WithLoadGRPCLoadModelOpts(grpcOpts), - } + o := backend.ModelOptions(*cfg, options, []model.Option{}) var backendErr error if cfg.Backend != "" { diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index d0f47373..6f56b453 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -268,10 +268,10 @@ func selectGRPCProcess(backend, assetDir string, f16 bool) string { // starts the grpcModelProcess for the backend, and returns a grpc client // It also loads the model -func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string) (*Model, error) { - return func(modelName, modelFile string) (*Model, error) { +func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string, string) (*Model, error) { + return func(modelID, modelName, modelFile string) (*Model, error) { - log.Debug().Msgf("Loading Model %s with gRPC (file: %s) (backend: %s): %+v", modelName, modelFile, backend, *o) + log.Debug().Msgf("Loading Model %s with gRPC (file: %s) (backend: %s): %+v", modelID, modelFile, backend, *o) var client *Model @@ -304,7 +304,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string return nil, fmt.Errorf("failed allocating free ports: %s", err.Error()) } // Make sure the process is executable - process, err := ml.startProcess(uri, o.model, serverAddress) + process, err := ml.startProcess(uri, modelID, serverAddress) if err != nil { log.Error().Err(err).Str("path", uri).Msg("failed to launch ") return nil, err @@ -312,11 +312,11 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string log.Debug().Msgf("GRPC Service Started") - client = NewModel(modelName, serverAddress, process) + client = NewModel(modelID, serverAddress, process) } else { log.Debug().Msg("external backend is uri") // address - client = NewModel(modelName, uri, nil) + client = NewModel(modelID, uri, nil) } } else { grpcProcess := backendPath(o.assetDir, backend) @@ -347,14 +347,14 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string args, grpcProcess = library.LoadLDSO(o.assetDir, args, grpcProcess) // Make sure the process is executable in any circumstance - process, err := ml.startProcess(grpcProcess, o.model, serverAddress, args...) + process, err := ml.startProcess(grpcProcess, modelID, serverAddress, args...) if err != nil { return nil, err } log.Debug().Msgf("GRPC Service Started") - client = NewModel(modelName, serverAddress, process) + client = NewModel(modelID, serverAddress, process) } log.Debug().Msgf("Wait for the service to start up") @@ -407,11 +407,7 @@ func (ml *ModelLoader) ListAvailableBackends(assetdir string) ([]string, error) func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err error) { o := NewOptions(opts...) - if o.model != "" { - log.Info().Msgf("Loading model '%s' with backend %s", o.model, o.backendString) - } else { - log.Info().Msgf("Loading model with backend %s", o.backendString) - } + log.Info().Msgf("Loading model '%s' with backend %s", o.modelID, o.backendString) backend := strings.ToLower(o.backendString) if realBackend, exists := Aliases[backend]; exists { @@ -420,10 +416,10 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e } if o.singleActiveBackend { - log.Debug().Msgf("Stopping all backends except '%s'", o.model) - err := ml.StopGRPC(allExcept(o.model)) + log.Debug().Msgf("Stopping all backends except '%s'", o.modelID) + err := ml.StopGRPC(allExcept(o.modelID)) if err != nil { - log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel") + log.Error().Err(err).Str("keptModel", o.modelID).Msg("error while shutting down all backends except for the keptModel") } } @@ -437,7 +433,7 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e backendToConsume = backend } - model, err := ml.LoadModel(o.model, ml.grpcModel(backendToConsume, o)) + model, err := ml.LoadModel(o.modelID, o.model, ml.grpcModel(backendToConsume, o)) if err != nil { return nil, err } @@ -450,18 +446,18 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { // Return earlier if we have a model already loaded // (avoid looping through all the backends) - if m := ml.CheckIsLoaded(o.model); m != nil { - log.Debug().Msgf("Model '%s' already loaded", o.model) + if m := ml.CheckIsLoaded(o.modelID); m != nil { + log.Debug().Msgf("Model '%s' already loaded", o.modelID) return m.GRPC(o.parallelRequests, ml.wd), nil } // If we can have only one backend active, kill all the others (except external backends) if o.singleActiveBackend { - log.Debug().Msgf("Stopping all backends except '%s'", o.model) - err := ml.StopGRPC(allExcept(o.model)) + log.Debug().Msgf("Stopping all backends except '%s'", o.modelID) + err := ml.StopGRPC(allExcept(o.modelID)) if err != nil { - log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing") + log.Error().Err(err).Str("keptModel", o.modelID).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing") } } @@ -480,23 +476,13 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { log.Debug().Msgf("Loading from the following backends (in order): %+v", autoLoadBackends) - if o.model != "" { - log.Info().Msgf("Trying to load the model '%s' with the backend '%s'", o.model, autoLoadBackends) - } + log.Info().Msgf("Trying to load the model '%s' with the backend '%s'", o.modelID, autoLoadBackends) for _, key := range autoLoadBackends { log.Info().Msgf("[%s] Attempting to load", key) - options := []Option{ + options := append(opts, []Option{ WithBackendString(key), - WithModel(o.model), - WithLoadGRPCLoadModelOpts(o.gRPCOptions), - WithThreads(o.threads), - WithAssetDir(o.assetDir), - } - - for k, v := range o.externalBackends { - options = append(options, WithExternalBackend(k, v)) - } + }...) model, modelerr := ml.BackendLoader(options...) if modelerr == nil && model != nil { diff --git a/pkg/model/loader.go b/pkg/model/loader.go index 68ac1a31..97e62fe4 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -114,9 +114,9 @@ func (ml *ModelLoader) ListModels() []Model { return models } -func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) (*Model, error)) (*Model, error) { +func (ml *ModelLoader) LoadModel(modelID, modelName string, loader func(string, string, string) (*Model, error)) (*Model, error) { // Check if we already have a loaded model - if model := ml.CheckIsLoaded(modelName); model != nil { + if model := ml.CheckIsLoaded(modelID); model != nil { return model, nil } @@ -126,7 +126,7 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( ml.mu.Lock() defer ml.mu.Unlock() - model, err := loader(modelName, modelFile) + model, err := loader(modelID, modelName, modelFile) if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) ( return nil, fmt.Errorf("loader didn't return a model") } - ml.models[modelName] = model + ml.models[modelID] = model return model, nil } diff --git a/pkg/model/loader_test.go b/pkg/model/loader_test.go index d0ad4e0c..83e47ec6 100644 --- a/pkg/model/loader_test.go +++ b/pkg/model/loader_test.go @@ -65,22 +65,22 @@ var _ = Describe("ModelLoader", func() { It("should load a model and keep it in memory", func() { mockModel = model.NewModel("foo", "test.model", nil) - mockLoader := func(modelName, modelFile string) (*model.Model, error) { + mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) { return mockModel, nil } - model, err := modelLoader.LoadModel("test.model", mockLoader) + model, err := modelLoader.LoadModel("foo", "test.model", mockLoader) Expect(err).To(BeNil()) Expect(model).To(Equal(mockModel)) - Expect(modelLoader.CheckIsLoaded("test.model")).To(Equal(mockModel)) + Expect(modelLoader.CheckIsLoaded("foo")).To(Equal(mockModel)) }) It("should return an error if loading the model fails", func() { - mockLoader := func(modelName, modelFile string) (*model.Model, error) { + mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) { return nil, errors.New("failed to load model") } - model, err := modelLoader.LoadModel("test.model", mockLoader) + model, err := modelLoader.LoadModel("foo", "test.model", mockLoader) Expect(err).To(HaveOccurred()) Expect(model).To(BeNil()) }) @@ -88,18 +88,16 @@ var _ = Describe("ModelLoader", func() { Context("ShutdownModel", func() { It("should shutdown a loaded model", func() { - mockModel = model.NewModel("foo", "test.model", nil) - - mockLoader := func(modelName, modelFile string) (*model.Model, error) { - return mockModel, nil + mockLoader := func(modelID, modelName, modelFile string) (*model.Model, error) { + return model.NewModel("foo", "test.model", nil), nil } - _, err := modelLoader.LoadModel("test.model", mockLoader) + _, err := modelLoader.LoadModel("foo", "test.model", mockLoader) Expect(err).To(BeNil()) - err = modelLoader.ShutdownModel("test.model") + err = modelLoader.ShutdownModel("foo") Expect(err).To(BeNil()) - Expect(modelLoader.CheckIsLoaded("test.model")).To(BeNil()) + Expect(modelLoader.CheckIsLoaded("foo")).To(BeNil()) }) }) }) diff --git a/pkg/model/options.go b/pkg/model/options.go index a3f4c855..e7fd06de 100644 --- a/pkg/model/options.go +++ b/pkg/model/options.go @@ -9,7 +9,7 @@ import ( type Options struct { backendString string model string - threads uint32 + modelID string assetDir string context context.Context @@ -68,12 +68,6 @@ func WithLoadGRPCLoadModelOpts(opts *pb.ModelOptions) Option { } } -func WithThreads(threads uint32) Option { - return func(o *Options) { - o.threads = threads - } -} - func WithAssetDir(assetDir string) Option { return func(o *Options) { o.assetDir = assetDir @@ -92,6 +86,12 @@ func WithSingleActiveBackend() Option { } } +func WithModelID(id string) Option { + return func(o *Options) { + o.modelID = id + } +} + func NewOptions(opts ...Option) *Options { o := &Options{ gRPCOptions: &pb.ModelOptions{}, diff --git a/pkg/model/process.go b/pkg/model/process.go index 48631d79..3e16ddaf 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -16,16 +16,26 @@ import ( ) func (ml *ModelLoader) deleteProcess(s string) error { - if m, exists := ml.models[s]; exists { - process := m.Process() - if process != nil { - if err := process.Stop(); err != nil { - log.Error().Err(err).Msgf("(deleteProcess) error while deleting process %s", s) - } - } + defer delete(ml.models, s) + + m, exists := ml.models[s] + if !exists { + // Nothing to do + return nil } - delete(ml.models, s) - return nil + + process := m.Process() + if process == nil { + // Nothing to do as there is no process + return nil + } + + err := process.Stop() + if err != nil { + log.Error().Err(err).Msgf("(deleteProcess) error while deleting process %s", s) + } + + return err } func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) error { diff --git a/tests/e2e-aio/e2e_test.go b/tests/e2e-aio/e2e_test.go index 36d127d2..a9c55497 100644 --- a/tests/e2e-aio/e2e_test.go +++ b/tests/e2e-aio/e2e_test.go @@ -260,11 +260,9 @@ var _ = Describe("E2E test", func() { resp, err := http.Post(rerankerEndpoint, "application/json", bytes.NewReader(serialized)) Expect(err).To(BeNil()) Expect(resp).ToNot(BeNil()) - Expect(resp.StatusCode).To(Equal(200)) - body, err := io.ReadAll(resp.Body) - Expect(err).To(BeNil()) - Expect(body).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(200), fmt.Sprintf("body: %s, response: %+v", body, resp)) deserializedResponse := schema.JINARerankResponse{} err = json.Unmarshal(body, &deserializedResponse) From 5488fc3bc14598ffb36a2d98f0b66705b45ebe0c Mon Sep 17 00:00:00 2001 From: Shraddha Date: Wed, 2 Oct 2024 12:26:18 +0530 Subject: [PATCH 0235/1530] feat: tokenization endpoint (#3710) endpoint to access the tokenizer Signed-off-by: shraddhazpy Co-authored-by: Ettore Di Giacinto Co-authored-by: Dave --- core/backend/tokenize.go | 50 +++++++++++++++++++++ core/http/endpoints/localai/tokenize.go | 58 +++++++++++++++++++++++++ core/http/routes/localai.go | 3 ++ core/schema/tokenize.go | 10 +++++ 4 files changed, 121 insertions(+) create mode 100644 core/backend/tokenize.go create mode 100644 core/http/endpoints/localai/tokenize.go create mode 100644 core/schema/tokenize.go diff --git a/core/backend/tokenize.go b/core/backend/tokenize.go new file mode 100644 index 00000000..3c78b17f --- /dev/null +++ b/core/backend/tokenize.go @@ -0,0 +1,50 @@ +package backend + +import ( + "github.com/mudler/LocalAI/core/config" + "github.com/mudler/LocalAI/core/schema" + "github.com/mudler/LocalAI/pkg/grpc" + model "github.com/mudler/LocalAI/pkg/model" +) + +func ModelTokenize(s string, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (schema.TokenizeResponse, error) { + + modelFile := backendConfig.Model + + grpcOpts := GRPCModelOpts(backendConfig) + + var inferenceModel grpc.Backend + var err error + + opts := modelOpts(backendConfig, appConfig, []model.Option{ + model.WithLoadGRPCLoadModelOpts(grpcOpts), + model.WithThreads(uint32(*backendConfig.Threads)), + model.WithAssetDir(appConfig.AssetsDestination), + model.WithModel(modelFile), + model.WithContext(appConfig.Context), + }) + + if backendConfig.Backend == "" { + inferenceModel, err = loader.GreedyLoader(opts...) + } else { + opts = append(opts, model.WithBackendString(backendConfig.Backend)) + inferenceModel, err = loader.BackendLoader(opts...) + } + if err != nil { + return schema.TokenizeResponse{}, err + } + + predictOptions := gRPCPredictOpts(backendConfig, loader.ModelPath) + predictOptions.Prompt = s + + // tokenize the string + resp, err := inferenceModel.TokenizeString(appConfig.Context, predictOptions) + if err != nil { + return schema.TokenizeResponse{}, err + } + + return schema.TokenizeResponse{ + Tokens: resp.Tokens, + }, nil + +} diff --git a/core/http/endpoints/localai/tokenize.go b/core/http/endpoints/localai/tokenize.go new file mode 100644 index 00000000..da110bf8 --- /dev/null +++ b/core/http/endpoints/localai/tokenize.go @@ -0,0 +1,58 @@ +package localai + +import ( + "github.com/gofiber/fiber/v2" + "github.com/mudler/LocalAI/core/backend" + "github.com/mudler/LocalAI/core/config" + fiberContext "github.com/mudler/LocalAI/core/http/ctx" + "github.com/mudler/LocalAI/core/schema" + "github.com/mudler/LocalAI/pkg/model" + "github.com/rs/zerolog/log" +) + +// TokenizeEndpoint exposes a REST API to tokenize the content +// @Summary Tokenize the input. +// @Success 200 {object} schema.TokenizeResponse "Response" +// @Router /v1/tokenize [post] +func TokenizeEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error { + return func(c *fiber.Ctx) error { + + input := new(schema.TokenizeRequest) + + // Get input data from the request body + if err := c.BodyParser(input); err != nil { + return err + } + + modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.Model, false) + if err != nil { + modelFile = input.Model + log.Warn().Msgf("Model not found in context: %s", input.Model) + } + + cfg, err := cl.LoadBackendConfigFileByName(modelFile, appConfig.ModelPath, + config.LoadOptionDebug(appConfig.Debug), + config.LoadOptionThreads(appConfig.Threads), + config.LoadOptionContextSize(appConfig.ContextSize), + config.LoadOptionF16(appConfig.F16), + ) + + if err != nil { + log.Err(err) + modelFile = input.Model + log.Warn().Msgf("Model not found in context: %s", input.Model) + } else { + modelFile = cfg.Model + } + log.Debug().Msgf("Request for model: %s", modelFile) + + tokenResponse, err := backend.ModelTokenize(input.Content, ml, *cfg, appConfig) + if err != nil { + return err + } + + c.JSON(tokenResponse) + return nil + + } +} diff --git a/core/http/routes/localai.go b/core/http/routes/localai.go index 2f65e779..f2f0dfa4 100644 --- a/core/http/routes/localai.go +++ b/core/http/routes/localai.go @@ -63,4 +63,7 @@ func RegisterLocalAIRoutes(app *fiber.App, app.Get("/system", localai.SystemInformations(ml, appConfig)) + // misc + app.Post("/v1/tokenize", localai.TokenizeEndpoint(cl, ml, appConfig)) + } diff --git a/core/schema/tokenize.go b/core/schema/tokenize.go new file mode 100644 index 00000000..3770cc5a --- /dev/null +++ b/core/schema/tokenize.go @@ -0,0 +1,10 @@ +package schema + +type TokenizeRequest struct { + Content string `json:"content"` + Model string `json:"model"` +} + +type TokenizeResponse struct { + Tokens []int32 `json:"tokens"` +} From 3acd767ac4f08242e345a255f0f5b11a2f70fe1d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 2 Oct 2024 08:59:06 +0200 Subject: [PATCH 0236/1530] chore: simplify model loading (#3715) Signed-off-by: Ettore Di Giacinto --- core/backend/tokenize.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/core/backend/tokenize.go b/core/backend/tokenize.go index 3c78b17f..c8ec8d1c 100644 --- a/core/backend/tokenize.go +++ b/core/backend/tokenize.go @@ -11,17 +11,11 @@ func ModelTokenize(s string, loader *model.ModelLoader, backendConfig config.Bac modelFile := backendConfig.Model - grpcOpts := GRPCModelOpts(backendConfig) - var inferenceModel grpc.Backend var err error - opts := modelOpts(backendConfig, appConfig, []model.Option{ - model.WithLoadGRPCLoadModelOpts(grpcOpts), - model.WithThreads(uint32(*backendConfig.Threads)), - model.WithAssetDir(appConfig.AssetsDestination), + opts := ModelOptions(backendConfig, appConfig, []model.Option{ model.WithModel(modelFile), - model.WithContext(appConfig.Context), }) if backendConfig.Backend == "" { From e5586e87813b5ce2cc87b019f200262bc8daaae8 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 2 Oct 2024 20:20:50 +0200 Subject: [PATCH 0237/1530] chore: get model also from query (#3716) Signed-off-by: Ettore Di Giacinto --- core/http/ctx/fiber.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/http/ctx/fiber.go b/core/http/ctx/fiber.go index 28a35ac4..254f0704 100644 --- a/core/http/ctx/fiber.go +++ b/core/http/ctx/fiber.go @@ -19,7 +19,9 @@ func ModelFromContext(ctx *fiber.Ctx, cl *config.BackendConfigLoader, loader *mo if ctx.Params("model") != "" { modelInput = ctx.Params("model") } - + if ctx.Query("model") != "" { + modelInput = ctx.Query("model") + } // Set model from bearer token, if available bearer := strings.TrimLeft(ctx.Get("authorization"), "Bear ") // Reduced duplicate characters of Bearer bearerExists := bearer != "" && loader.ExistsInModelPath(bearer) From 4686877c6dc216a87e835bfb0db2b2fcda7e8300 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 2 Oct 2024 20:37:40 +0200 Subject: [PATCH 0238/1530] fix(initializer): correctly reap dangling processes (#3717) Signed-off-by: Ettore Di Giacinto --- pkg/model/initializers.go | 12 +++++++++--- pkg/model/loader.go | 2 +- pkg/model/process.go | 4 ++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 6f56b453..9ecd77a6 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -376,7 +376,9 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string if !ready { log.Debug().Msgf("GRPC Service NOT ready") - ml.deleteProcess(o.model) + if process := client.Process(); process != nil { + process.Stop() + } return nil, fmt.Errorf("grpc service not ready") } @@ -388,11 +390,15 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string res, err := client.GRPC(o.parallelRequests, ml.wd).LoadModel(o.context, &options) if err != nil { - ml.deleteProcess(o.model) + if process := client.Process(); process != nil { + process.Stop() + } return nil, fmt.Errorf("could not load model: %w", err) } if !res.Success { - ml.deleteProcess(o.model) + if process := client.Process(); process != nil { + process.Stop() + } return nil, fmt.Errorf("could not load model (no success): %s", res.Message) } diff --git a/pkg/model/loader.go b/pkg/model/loader.go index 97e62fe4..0dcc9915 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -128,7 +128,7 @@ func (ml *ModelLoader) LoadModel(modelID, modelName string, loader func(string, defer ml.mu.Unlock() model, err := loader(modelID, modelName, modelFile) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load model with internal loader: %s", err) } if model == nil { diff --git a/pkg/model/process.go b/pkg/model/process.go index 3e16ddaf..c27fbda3 100644 --- a/pkg/model/process.go +++ b/pkg/model/process.go @@ -18,14 +18,18 @@ import ( func (ml *ModelLoader) deleteProcess(s string) error { defer delete(ml.models, s) + log.Debug().Msgf("Deleting process %s", s) + m, exists := ml.models[s] if !exists { + log.Error().Msgf("Model does not exist %s", s) // Nothing to do return nil } process := m.Process() if process == nil { + log.Error().Msgf("No process for %s", s) // Nothing to do as there is no process return nil } From afaff175d0e7960aabb987d34e96682ad584b2e1 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 3 Oct 2024 09:37:05 +0200 Subject: [PATCH 0239/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `a39ab216aa624308fda7fa84439c6b61dc98b87a` (#3718) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 27232b9a..6b2e7fc5 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=3f1ae2e32cde00c39b96be6d01c2997c29bae555 +CPPLLAMA_VERSION?=a39ab216aa624308fda7fa84439c6b61dc98b87a # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From b5115903bfe35bec8c5fc7c73f77b038859f3c14 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 3 Oct 2024 09:39:49 +0200 Subject: [PATCH 0240/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `ede1718f6d45aa3f7ad4a1e169dfbc9d51570c4e` (#3719) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6b2e7fc5..af9f1c8c 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=2ef717b293fe93872cc3a03ca77942936a281959 +WHISPER_CPP_VERSION?=ede1718f6d45aa3f7ad4a1e169dfbc9d51570c4e # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From d06a052d54699660ca8b853492ae06654dc36ca2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 3 Oct 2024 18:38:41 +0200 Subject: [PATCH 0241/1530] chore(federated): display a message when nodes are not available (#3721) Signed-off-by: Ettore Di Giacinto --- core/p2p/federated_server.go | 44 +++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/core/p2p/federated_server.go b/core/p2p/federated_server.go index d0a8c2f8..d80af082 100644 --- a/core/p2p/federated_server.go +++ b/core/p2p/federated_server.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "io" "net" "github.com/mudler/edgevpn/pkg/node" @@ -41,7 +42,7 @@ func (fs *FederatedServer) proxy(ctx context.Context, node *node.Node) error { log.Error().Err(err).Msg("Error listening") return err } - // ll.Info("Binding local port on", srcaddr) + go func() { <-ctx.Done() l.Close() @@ -82,6 +83,7 @@ func (fs *FederatedServer) proxy(ctx context.Context, node *node.Node) error { if workerID == "" { log.Error().Msg("No available nodes yet") + fs.sendHTMLResponse(conn, 503, "Sorry, waiting for nodes to connect") return } @@ -89,6 +91,7 @@ func (fs *FederatedServer) proxy(ctx context.Context, node *node.Node) error { nodeData, exists := GetNode(fs.service, workerID) if !exists { log.Error().Msgf("Node %s not found", workerID) + fs.sendHTMLResponse(conn, 404, "Node not found") return } @@ -100,3 +103,42 @@ func (fs *FederatedServer) proxy(ctx context.Context, node *node.Node) error { } } } + +// sendHTMLResponse sends a basic HTML response with a status code and a message. +// This is extracted to make the HTML content maintainable. +func (fs *FederatedServer) sendHTMLResponse(conn net.Conn, statusCode int, message string) { + defer conn.Close() + + // Define the HTML content separately for easier maintenance. + htmlContent := fmt.Sprintf("

%s

\r\n", message) + + // Create the HTTP response with dynamic status code and content. + response := fmt.Sprintf( + "HTTP/1.1 %d %s\r\n"+ + "Content-Type: text/html\r\n"+ + "Connection: close\r\n"+ + "\r\n"+ + "%s", + statusCode, getHTTPStatusText(statusCode), htmlContent, + ) + + // Write the response to the client connection. + _, writeErr := io.WriteString(conn, response) + if writeErr != nil { + log.Error().Err(writeErr).Msg("Error writing response to client") + } +} + +// getHTTPStatusText returns a textual representation of HTTP status codes. +func getHTTPStatusText(statusCode int) string { + switch statusCode { + case 503: + return "Service Unavailable" + case 404: + return "Not Found" + case 200: + return "OK" + default: + return "Unknown Status" + } +} From 4b131a70904fd39499f6de652ad83dc0f8f41c77 Mon Sep 17 00:00:00 2001 From: JJ Asghar Date: Thu, 3 Oct 2024 13:03:35 -0500 Subject: [PATCH 0242/1530] Update CONTRIBUTING.md (#3723) Updated some formatting in the doc. Signed-off-by: JJ Asghar --- CONTRIBUTING.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 593ad0ed..9fb20012 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,8 +15,6 @@ Thank you for your interest in contributing to LocalAI! We appreciate your time - [Documentation](#documentation) - [Community and Communication](#community-and-communication) - - ## Getting Started ### Prerequisites @@ -54,7 +52,7 @@ If you find a bug, have a feature request, or encounter any issues, please check ## Coding Guidelines -- No specific coding guidelines at the moment. Please make sure the code can be tested. The most popular lint tools like []`golangci-lint`](https://golangci-lint.run) can help you here. +- No specific coding guidelines at the moment. Please make sure the code can be tested. The most popular lint tools like [`golangci-lint`](https://golangci-lint.run) can help you here. ## Testing @@ -84,5 +82,3 @@ We are welcome the contribution of the documents, please open new PR or create a - You can reach out via the Github issue tracker. - Open a new discussion at [Discussion](https://github.com/go-skynet/LocalAI/discussions) - Join the Discord channel [Discord](https://discord.gg/uJAeKSAGDy) - ---- From a778668bcda6d949fdf5557d83e5e835cc7d8769 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 3 Oct 2024 23:17:32 +0200 Subject: [PATCH 0243/1530] models(gallery): add salamandra-7b-instruct (#3726) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 0924e5cf..289c2aff 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,28 @@ --- +- name: "salamandra-7b-instruct" + icon: https://huggingface.co/BSC-LT/salamandra-7b-instruct/resolve/main/images/salamandra_header.png + # Uses chatml + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + license: apache-2.0 + urls: + - https://huggingface.co/BSC-LT/salamandra-7b-instruct + - https://huggingface.co/cstr/salamandra-7b-instruct-GGUF + tags: + - llm + - gguf + - gpu + - cpu + - salamandra + description: | + Transformer-based decoder-only language model that has been pre-trained on 7.8 trillion tokens of highly curated data. The pre-training corpus contains text in 35 European languages and code. + Salamandra comes in three different sizes — 2B, 7B and 40B parameters — with their respective base and instruction-tuned variants. This model card corresponds to the 7B instructed version. + overrides: + parameters: + model: salamandra-7b-instruct.Q4_K_M-f32.gguf + files: + - filename: salamandra-7b-instruct.Q4_K_M-f32.gguf + sha256: bac8e8c1d1d9d53cbdb148b8ff9ad378ddb392429207099e85b5aae3a43bff3d + uri: huggingface://cstr/salamandra-7b-instruct-GGUF/salamandra-7b-instruct.Q4_K_M-f32.gguf ## llama3.2 - &llama32 url: "github:mudler/LocalAI/gallery/llama3.1-instruct.yaml@master" From 43144c474336d4b02ab543d2ead18ba758ad5be8 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 4 Oct 2024 09:09:41 +0200 Subject: [PATCH 0244/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `d5ed2b929d85bbd7dbeecb690880f07d9d7a6077` (#3725) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index af9f1c8c..a7d9847d 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=a39ab216aa624308fda7fa84439c6b61dc98b87a +CPPLLAMA_VERSION?=d5ed2b929d85bbd7dbeecb690880f07d9d7a6077 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 04c0841ca9e085dfd835b16684a8b82e57232068 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 4 Oct 2024 09:10:07 +0200 Subject: [PATCH 0245/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `ccc2547210e09e3a1785817383ab770389bb442b` (#3724) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a7d9847d..d7a8bda5 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=ede1718f6d45aa3f7ad4a1e169dfbc9d51570c4e +WHISPER_CPP_VERSION?=ccc2547210e09e3a1785817383ab770389bb442b # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 648ffdf449d1cc114bb0d8fb4ceb03b6ba3de4ca Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 4 Oct 2024 18:32:29 +0200 Subject: [PATCH 0246/1530] feat(multimodal): allow to template placeholders (#3728) feat(multimodal): allow to template image placeholders Signed-off-by: Ettore Di Giacinto --- core/config/backend_config.go | 4 ++++ core/http/endpoints/openai/request.go | 21 ++++++++++++++++++--- pkg/model/initializers.go | 2 +- pkg/templates/multimodal.go | 24 ++++++++++++++++++++++++ pkg/templates/multimodal_test.go | 19 +++++++++++++++++++ 5 files changed, 66 insertions(+), 4 deletions(-) create mode 100644 pkg/templates/multimodal.go create mode 100644 pkg/templates/multimodal_test.go diff --git a/core/config/backend_config.go b/core/config/backend_config.go index 8db94f7c..79e134d8 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -196,6 +196,10 @@ type TemplateConfig struct { // JoinChatMessagesByCharacter is a string that will be used to join chat messages together. // It defaults to \n JoinChatMessagesByCharacter *string `yaml:"join_chat_messages_by_character"` + + Video string `yaml:"video"` + Image string `yaml:"image"` + Audio string `yaml:"audio"` } func (c *BackendConfig) UnmarshalYAML(value *yaml.Node) error { diff --git a/core/http/endpoints/openai/request.go b/core/http/endpoints/openai/request.go index d6182a39..a418433e 100644 --- a/core/http/endpoints/openai/request.go +++ b/core/http/endpoints/openai/request.go @@ -12,6 +12,7 @@ import ( "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/pkg/functions" "github.com/mudler/LocalAI/pkg/model" + "github.com/mudler/LocalAI/pkg/templates" "github.com/mudler/LocalAI/pkg/utils" "github.com/rs/zerolog/log" ) @@ -168,8 +169,13 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque continue CONTENT } input.Messages[i].StringVideos = append(input.Messages[i].StringVideos, base64) // TODO: make sure that we only return base64 stuff + + t := "[vid-{{.ID}}]{{.Text}}" + if config.TemplateConfig.Video != "" { + t = config.TemplateConfig.Video + } // set a placeholder for each image - input.Messages[i].StringContent = fmt.Sprintf("[vid-%d]", vidIndex) + input.Messages[i].StringContent + input.Messages[i].StringContent, _ = templates.TemplateMultiModal(t, vidIndex, input.Messages[i].StringContent) vidIndex++ case "audio_url", "audio": // Decode content as base64 either if it's an URL or base64 text @@ -180,7 +186,11 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque } input.Messages[i].StringAudios = append(input.Messages[i].StringAudios, base64) // TODO: make sure that we only return base64 stuff // set a placeholder for each image - input.Messages[i].StringContent = fmt.Sprintf("[audio-%d]", audioIndex) + input.Messages[i].StringContent + t := "[audio-{{.ID}}]{{.Text}}" + if config.TemplateConfig.Audio != "" { + t = config.TemplateConfig.Audio + } + input.Messages[i].StringContent, _ = templates.TemplateMultiModal(t, audioIndex, input.Messages[i].StringContent) audioIndex++ case "image_url", "image": // Decode content as base64 either if it's an URL or base64 text @@ -189,9 +199,14 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque log.Error().Msgf("Failed encoding image: %s", err) continue CONTENT } + + t := "[img-{{.ID}}]{{.Text}}" + if config.TemplateConfig.Image != "" { + t = config.TemplateConfig.Image + } input.Messages[i].StringImages = append(input.Messages[i].StringImages, base64) // TODO: make sure that we only return base64 stuff // set a placeholder for each image - input.Messages[i].StringContent = fmt.Sprintf("[img-%d]", imgIndex) + input.Messages[i].StringContent + input.Messages[i].StringContent, _ = templates.TemplateMultiModal(t, imgIndex, input.Messages[i].StringContent) imgIndex++ } } diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 9ecd77a6..1171de4d 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -314,7 +314,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string client = NewModel(modelID, serverAddress, process) } else { - log.Debug().Msg("external backend is uri") + log.Debug().Msg("external backend is a uri") // address client = NewModel(modelID, uri, nil) } diff --git a/pkg/templates/multimodal.go b/pkg/templates/multimodal.go new file mode 100644 index 00000000..cc56c492 --- /dev/null +++ b/pkg/templates/multimodal.go @@ -0,0 +1,24 @@ +package templates + +import ( + "bytes" + "text/template" +) + +func TemplateMultiModal(templateString string, templateID int, text string) (string, error) { + // compile the template + tmpl, err := template.New("template").Parse(templateString) + if err != nil { + return "", err + } + result := bytes.NewBuffer(nil) + // execute the template + err = tmpl.Execute(result, struct { + ID int + Text string + }{ + ID: templateID, + Text: text, + }) + return result.String(), err +} diff --git a/pkg/templates/multimodal_test.go b/pkg/templates/multimodal_test.go new file mode 100644 index 00000000..d1a8bd5b --- /dev/null +++ b/pkg/templates/multimodal_test.go @@ -0,0 +1,19 @@ +package templates_test + +import ( + . "github.com/mudler/LocalAI/pkg/templates" // Update with your module path + + // Update with your module path + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("EvaluateTemplate", func() { + Context("templating simple strings for multimodal chat", func() { + It("should template messages correctly", func() { + result, err := TemplateMultiModal("[img-{{.ID}}]{{.Text}}", 1, "bar") + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal("[img-1]bar")) + }) + }) +}) From 408dfe62ee5d6d378e99ee7a094a119671ec208e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 4 Oct 2024 19:52:43 +0200 Subject: [PATCH 0247/1530] Update README.md Signed-off-by: Ettore Di Giacinto --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 28f5bd22..44beeb71 100644 --- a/README.md +++ b/README.md @@ -68,9 +68,7 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu [💻 Getting started](https://localai.io/basics/getting_started/index.html) -## 🔥🔥 Hot topics / Roadmap - -[Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) +## 📰 Latest project news - Aug 2024: 🆕 FLUX-1, [P2P Explorer](https://explorer.localai.io) - July 2024: 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723 @@ -83,8 +81,11 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu - May 2024: Chat, TTS, and Image generation in the WebUI: https://github.com/mudler/LocalAI/pull/2222 - April 2024: Reranker API: https://github.com/mudler/LocalAI/pull/2121 -Hot topics (looking for contributors): +Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) +## 🔥🔥 Hot topics (looking for help): + +- Multimodal with vLLM and Video understanding: https://github.com/mudler/LocalAI/pull/3729 - Realtime API https://github.com/mudler/LocalAI/issues/3714 - 🔥🔥 Distributed, P2P Global community pools: https://github.com/mudler/LocalAI/issues/3113 - WebUI improvements: https://github.com/mudler/LocalAI/issues/2156 From 2553de01878b76f94daa29d0cb135c0bc0730ea4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 4 Oct 2024 23:42:05 +0200 Subject: [PATCH 0248/1530] feat(vllm): add support for image-to-text and video-to-text (#3729) * feat(vllm): add support for image-to-text Related to https://github.com/mudler/LocalAI/issues/3670 Signed-off-by: Ettore Di Giacinto * feat(vllm): add support for video-to-text Closes: https://github.com/mudler/LocalAI/issues/2318 Signed-off-by: Ettore Di Giacinto * feat(vllm): support CPU installations Signed-off-by: Ettore Di Giacinto * feat(vllm): add bnb Signed-off-by: Ettore Di Giacinto * chore: add docs reference Signed-off-by: Ettore Di Giacinto * Apply suggestions from code review Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto Signed-off-by: Ettore Di Giacinto --- backend/python/vllm/backend.py | 73 +++++++++++++++++-- backend/python/vllm/install.sh | 16 +++- backend/python/vllm/requirements-cublas11.txt | 3 +- backend/python/vllm/requirements-cublas12.txt | 3 +- backend/python/vllm/requirements-hipblas.txt | 3 +- backend/python/vllm/requirements-intel.txt | 3 +- 6 files changed, 91 insertions(+), 10 deletions(-) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index 2cf15c1c..dfbb1503 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -5,6 +5,8 @@ import argparse import signal import sys import os +from typing import List +from PIL import Image import backend_pb2 import backend_pb2_grpc @@ -15,6 +17,8 @@ from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid from vllm.transformers_utils.tokenizer import get_tokenizer +from vllm.multimodal.utils import fetch_image +from vllm.assets.video import VideoAsset _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -105,6 +109,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): try: self.llm = AsyncLLMEngine.from_engine_args(engine_args) except Exception as err: + print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr) return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") try: @@ -117,7 +122,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): ) except Exception as err: return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") - + print("Model loaded successfully", file=sys.stderr) return backend_pb2.Result(message="Model loaded successfully", success=True) async def Predict(self, request, context): @@ -196,15 +201,33 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): if request.Seed != 0: sampling_params.seed = request.Seed + # Extract image paths and process images prompt = request.Prompt - - # If tokenizer template is enabled and messages are provided instead of prompt apply the tokenizer template + + image_paths = request.Images + image_data = [self.load_image(img_path) for img_path in image_paths] + + videos_path = request.Videos + video_data = [self.load_video(video_path) for video_path in videos_path] + + # If tokenizer template is enabled and messages are provided instead of prompt, apply the tokenizer template if not request.Prompt and request.UseTokenizerTemplate and request.Messages: prompt = self.tokenizer.apply_chat_template(request.Messages, tokenize=False, add_generation_prompt=True) - # Generate text + # Generate text using the LLM engine request_id = random_uuid() - outputs = self.llm.generate(prompt, sampling_params, request_id) + print(f"Generating text with request_id: {request_id}", file=sys.stderr) + outputs = self.llm.generate( + { + "prompt": prompt, + "multi_modal_data": { + "image": image_data if image_data else None, + "video": video_data if video_data else None, + } if image_data or video_data else None, + }, + sampling_params=sampling_params, + request_id=request_id, + ) # Stream the results generated_text = "" @@ -227,9 +250,49 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): if streaming: return + # Remove the image files from /tmp folder + for img_path in image_paths: + try: + os.remove(img_path) + except Exception as e: + print(f"Error removing image file: {img_path}, {e}", file=sys.stderr) + # Sending the final generated text yield backend_pb2.Reply(message=bytes(generated_text, encoding='utf-8')) + def load_image(self, image_path: str): + """ + Load an image from the given file path. + + Args: + image_path (str): The path to the image file. + + Returns: + Image: The loaded image. + """ + try: + return Image.open(image_path) + except Exception as e: + print(f"Error loading image {image_path}: {e}", file=sys.stderr) + return self.load_video(image_path) + + def load_video(self, video_path: str): + """ + Load a video from the given file path. + + Args: + video_path (str): The path to the image file. + + Returns: + Video: The loaded video. + """ + try: + video = VideoAsset(name=video_path).np_ndarrays + return video + except Exception as e: + print(f"Error loading video {image_path}: {e}", file=sys.stderr) + return None + async def serve(address): # Start asyncio gRPC server server = grpc.aio.server(migration_thread_pool=futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)) diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 78a3d5ba..022cf8bf 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -13,4 +13,18 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi -installRequirements +if [ "x${BUILD_TYPE}" == "x" ]; then + ensureVenv + # https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html + if [ ! -d vllm ]; then + git clone https://github.com/vllm-project/vllm + fi + pushd vllm + uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.66.2 protobuf bitsandbytes + uv pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu + VLLM_TARGET_DEVICE=cpu python setup.py install + popd + rm -rf vllm + else + installRequirements +fi diff --git a/backend/python/vllm/requirements-cublas11.txt b/backend/python/vllm/requirements-cublas11.txt index 43817727..c448a91d 100644 --- a/backend/python/vllm/requirements-cublas11.txt +++ b/backend/python/vllm/requirements-cublas11.txt @@ -1,4 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 accelerate torch -transformers \ No newline at end of file +transformers +bitsandbytes \ No newline at end of file diff --git a/backend/python/vllm/requirements-cublas12.txt b/backend/python/vllm/requirements-cublas12.txt index 765a1ef5..e007f094 100644 --- a/backend/python/vllm/requirements-cublas12.txt +++ b/backend/python/vllm/requirements-cublas12.txt @@ -1,3 +1,4 @@ accelerate torch -transformers \ No newline at end of file +transformers +bitsandbytes \ No newline at end of file diff --git a/backend/python/vllm/requirements-hipblas.txt b/backend/python/vllm/requirements-hipblas.txt index c73d8141..9dff852d 100644 --- a/backend/python/vllm/requirements-hipblas.txt +++ b/backend/python/vllm/requirements-hipblas.txt @@ -1,4 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 accelerate torch -transformers \ No newline at end of file +transformers +bitsandbytes \ No newline at end of file diff --git a/backend/python/vllm/requirements-intel.txt b/backend/python/vllm/requirements-intel.txt index 1f82c46e..95443368 100644 --- a/backend/python/vllm/requirements-intel.txt +++ b/backend/python/vllm/requirements-intel.txt @@ -4,4 +4,5 @@ accelerate torch transformers optimum[openvino] -setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file +setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 +bitsandbytes \ No newline at end of file From aa0564a1c6cc0476e72741ead09ef0d65a6ff0d2 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 5 Oct 2024 00:09:02 +0200 Subject: [PATCH 0249/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `71967c2a6d30da9f61580d3e2d4cb00e0223b6fa` (#3731) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d7a8bda5..329c0ee4 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=d5ed2b929d85bbd7dbeecb690880f07d9d7a6077 +CPPLLAMA_VERSION?=71967c2a6d30da9f61580d3e2d4cb00e0223b6fa # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 905473c739ada8d2b9baa24119d3f2707e8d0215 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 5 Oct 2024 00:09:24 +0200 Subject: [PATCH 0250/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `2944cb72d95282378037cb0eb45c9e2b2529ff2c` (#3730) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 329c0ee4..28f5086a 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=ccc2547210e09e3a1785817383ab770389bb442b +WHISPER_CPP_VERSION?=2944cb72d95282378037cb0eb45c9e2b2529ff2c # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From e28e80857b1aebbb8bb57c88664e9cdcddb60150 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 5 Oct 2024 10:41:35 +0200 Subject: [PATCH 0251/1530] feat(shutdown): allow force shutdown of backends (#3733) We default to a soft kill, however, we might want to force killing backends after a while to avoid hanging requests (which may hallucinate indefinetly) Signed-off-by: Ettore Di Giacinto --- pkg/model/loader.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/model/loader.go b/pkg/model/loader.go index 0dcc9915..52f479c5 100644 --- a/pkg/model/loader.go +++ b/pkg/model/loader.go @@ -157,6 +157,11 @@ func (ml *ModelLoader) ShutdownModel(modelName string) error { } time.Sleep(dur) retries++ + + if retries > 10 && os.Getenv("LOCALAI_FORCE_BACKEND_SHUTDOWN") == "true" { + log.Warn().Msgf("Model %s is still busy after %d retries. Forcing shutdown.", modelName, retries) + break + } } return ml.deleteProcess(modelName) From 092bb0bd6b071daeb5d3b57fd0c0cb2853389efb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 5 Oct 2024 15:14:27 +0200 Subject: [PATCH 0252/1530] fix(base-grpc): close channel in base grpc server (#3734) If the LLM does not implement any logic for PredictStream, we close the channel immediately to not leave the process hanging. Signed-off-by: Ettore Di Giacinto --- pkg/grpc/base/base.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/grpc/base/base.go b/pkg/grpc/base/base.go index 95dca561..3356f86b 100644 --- a/pkg/grpc/base/base.go +++ b/pkg/grpc/base/base.go @@ -41,6 +41,7 @@ func (llm *Base) Predict(opts *pb.PredictOptions) (string, error) { } func (llm *Base) PredictStream(opts *pb.PredictOptions, results chan string) error { + close(results) return fmt.Errorf("unimplemented") } From a9abfa2b61ffceeab13e967aac494e087b090bcb Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 6 Oct 2024 10:09:57 +0200 Subject: [PATCH 0253/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `6a94163b913d8e974e60d9ac56c8930d19f45773` (#3735) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 28f5086a..fe39f33a 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=2944cb72d95282378037cb0eb45c9e2b2529ff2c +WHISPER_CPP_VERSION?=6a94163b913d8e974e60d9ac56c8930d19f45773 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 1b8a66300119b3c961bc8ddb2f930a463c260119 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 6 Oct 2024 10:10:13 +0200 Subject: [PATCH 0254/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `8c475b97b8ba7d678d4c9904b1161bd8811a9b44` (#3736) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fe39f33a..0196364e 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=71967c2a6d30da9f61580d3e2d4cb00e0223b6fa +CPPLLAMA_VERSION?=8c475b97b8ba7d678d4c9904b1161bd8811a9b44 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From c8bfb72104106555e4db783cef247644dc657dad Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 6 Oct 2024 23:40:25 +0200 Subject: [PATCH 0255/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `d5cb86844f26f600c48bf3643738ea68138f961d` (#3738) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0196364e..c0ee021e 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=8c475b97b8ba7d678d4c9904b1161bd8811a9b44 +CPPLLAMA_VERSION?=d5cb86844f26f600c48bf3643738ea68138f961d # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 55af0b1c688f3a4d0e2e60e78e66803c8feacc0b Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Mon, 7 Oct 2024 09:33:28 +0200 Subject: [PATCH 0256/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `9f346d00840bcd7af62794871109841af40cecfb` (#3739) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c0ee021e..86437b67 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=6a94163b913d8e974e60d9ac56c8930d19f45773 +WHISPER_CPP_VERSION?=9f346d00840bcd7af62794871109841af40cecfb # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 03bbbea039fbef0053e2d6ba41fd8ceb0c89b5c2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 09:46:44 +0200 Subject: [PATCH 0257/1530] models(gallery): add mn-backyardai-party-12b-v1-iq-arm-imatrix (#3740) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 289c2aff..8b449dcb 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1734,6 +1734,26 @@ - filename: MagnusIntellectus-12B-v1.i1-Q4_K_M.gguf sha256: c97107983b4edc5b6f2a592d227ca2dd4196e2af3d3bc0fe6b7a8954a1fb5870 uri: huggingface://mradermacher/MagnusIntellectus-12B-v1-i1-GGUF/MagnusIntellectus-12B-v1.i1-Q4_K_M.gguf +- !!merge <<: *mistral03 + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + name: "mn-backyardai-party-12b-v1-iq-arm-imatrix" + icon: https://huggingface.co/Sao10K/MN-BackyardAI-Party-12B-v1/resolve/main/party1.png + urls: + - https://huggingface.co/Sao10K/MN-BackyardAI-Party-12B-v1 + - https://huggingface.co/Lewdiculous/MN-BackyardAI-Party-12B-v1-GGUF-IQ-ARM-Imatrix + description: | + This is a group-chat based roleplaying model, based off of 12B-Lyra-v4a2, a variant of Lyra-v4 that is currently private. + + It is trained on an entirely human-based dataset, based on forum / internet group roleplaying styles. The only augmentation done with LLMs is to the character sheets, to fit to the system prompt, to fit various character sheets within context. + + This model is still capable of 1 on 1 roleplay, though I recommend using ChatML when doing that instead. + overrides: + parameters: + model: MN-BackyardAI-Party-12B-v1-Q4_K_M-imat.gguf + files: + - filename: MN-BackyardAI-Party-12B-v1-Q4_K_M-imat.gguf + sha256: cea68768dff58b553974b755bb40ef790ab8b86866d9b5c46bc2e6c3311b876a + uri: huggingface://Lewdiculous/MN-BackyardAI-Party-12B-v1-GGUF-IQ-ARM-Imatrix/MN-BackyardAI-Party-12B-v1-Q4_K_M-imat.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From debc0974a69d5bfd8c0afb47ead9503b4507208c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 09:51:57 +0200 Subject: [PATCH 0258/1530] models(gallery): add t.e-8.1-iq-imatrix-request (#3741) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 8b449dcb..c2764ba4 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -319,6 +319,23 @@ - filename: calme-2.2-qwen2.5-72b.i1-Q4_K_M.gguf sha256: 5fdfa599724d7c78502c477ced1d294e92781b91d3265bd0748fbf15a6fefde6 uri: huggingface://mradermacher/calme-2.2-qwen2.5-72b-i1-GGUF/calme-2.2-qwen2.5-72b.i1-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "t.e-8.1-iq-imatrix-request" + # chatml + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/K1aNPf32z-6tYZdcSQBzF.png + urls: + - https://huggingface.co/Cran-May/T.E-8.1 + - https://huggingface.co/Lewdiculous/T.E-8.1-GGUF-IQ-Imatrix-Request + description: | + Trained for roleplay uses. + overrides: + parameters: + model: T.E-8.1-Q4_K_M-imat.gguf + files: + - filename: T.E-8.1-Q4_K_M-imat.gguf + sha256: 1b7892b82c01ea4cbebe34cd00f9836cbbc369fc3247c1f44a92842201e7ec0b + uri: huggingface://Lewdiculous/T.E-8.1-GGUF-IQ-Imatrix-Request/T.E-8.1-Q4_K_M-imat.gguf - &smollm ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From cfd61122562aa5485ce9fafaca08addc96aac69d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 09:52:23 +0200 Subject: [PATCH 0259/1530] models(gallery): add violet_twilight-v0.2-iq-imatrix (#3742) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c2764ba4..22978dd2 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1161,6 +1161,22 @@ - filename: Llama-3.1-8B-ArliAI-RPMax-v1.1-Q4_K_M.gguf sha256: 0a601c7341228d9160332965298d799369a1dc2b7080771fb8051bdeb556b30c uri: huggingface://bartowski/Llama-3.1-8B-ArliAI-RPMax-v1.1-GGUF/Llama-3.1-8B-ArliAI-RPMax-v1.1-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "violet_twilight-v0.2-iq-imatrix" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/64adfd277b5ff762771e4571/P962FQhRG4I8nbU_DJolY.png + urls: + - https://huggingface.co/Epiculous/Violet_Twilight-v0.2 + - https://huggingface.co/Lewdiculous/Violet_Twilight-v0.2-GGUF-IQ-Imatrix + description: | + Now for something a bit different, Violet_Twilight-v0.2! This model is a SLERP merge of Azure_Dusk-v0.2 and Crimson_Dawn-v0.2! + overrides: + parameters: + model: Violet_Twilight-v0.2-Q4_K_M-imat.gguf + files: + - filename: Violet_Twilight-v0.2-Q4_K_M-imat.gguf + sha256: 0793d196a00cd6fd4e67b8c585b27a94d397e33d427e4ad4aa9a16b7abc339cd + uri: huggingface://Lewdiculous/Violet_Twilight-v0.2-GGUF-IQ-Imatrix/Violet_Twilight-v0.2-Q4_K_M-imat.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 04f284d202caf7b27c1a170cb5d7b1013dddb396 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 09:56:33 +0200 Subject: [PATCH 0260/1530] models(gallery): add gemma-2-9b-it-abliterated (#3743) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 22978dd2..42f375b3 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2383,6 +2383,24 @@ - filename: Gemma-2-2B-ArliAI-RPMax-v1.1-Q4_K_M.gguf sha256: 89fe35345754d7e9de8d0c0d5bf35b2be9b12a09811b365b712b8b27112f7712 uri: huggingface://bartowski/Gemma-2-2B-ArliAI-RPMax-v1.1-GGUF/Gemma-2-2B-ArliAI-RPMax-v1.1-Q4_K_M.gguf +- !!merge <<: *gemma + name: "gemma-2-9b-it-abliterated" + urls: + - https://huggingface.co/IlyaGusev/gemma-2-9b-it-abliterated + - https://huggingface.co/bartowski/gemma-2-9b-it-abliterated-GGUF + description: | + Abliterated version of google/gemma-2-9b-it. + + The abliteration script (link) is based on code from the blog post and heavily uses TransformerLens. The only major difference from the code used for Llama is scaling the embedding layer back. + + Orthogonalization did not produce the same results as regular interventions since there are RMSNorm layers before merging activations into the residual stream. However, the final model still seems to be uncensored. + overrides: + parameters: + model: gemma-2-9b-it-abliterated-Q4_K_M.gguf + files: + - filename: gemma-2-9b-it-abliterated-Q4_K_M.gguf + sha256: 88d84ac9796732c10f6c58e0feb4db8e04c05d74bdb7047a5e37906a589896e1 + uri: huggingface://bartowski/gemma-2-9b-it-abliterated-GGUF/gemma-2-9b-it-abliterated-Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From fbca9f82fd7962bc6c330544cedb0d5c7c288be3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 11:22:55 +0200 Subject: [PATCH 0261/1530] fix(vllm): bump cmake - vllm requires it (#3744) * fix(vllm): bump cmake - vllm requires it Signed-off-by: Ettore Di Giacinto * chore(tests): try to increase coqui timeout Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- Dockerfile | 14 ++++++++++---- backend/python/coqui/test.py | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8c657469..532e3d55 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,7 @@ FROM ${BASE_IMAGE} AS requirements-core USER root ARG GO_VERSION=1.22.6 +ARG CMAKE_VERSION=3.26.4 ARG TARGETARCH ARG TARGETVARIANT @@ -21,8 +22,7 @@ RUN apt-get update && \ build-essential \ ccache \ ca-certificates \ - cmake \ - curl \ + curl libssl-dev \ git \ unzip upx-ucl && \ apt-get clean && \ @@ -32,6 +32,9 @@ RUN apt-get update && \ RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin +# Install CMake (the version in 22.04 is too old) +RUN curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install + # Install grpc compilers RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af @@ -188,6 +191,7 @@ FROM ${GRPC_BASE_IMAGE} AS grpc # This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI ARG GRPC_MAKEFLAGS="-j4 -Otarget" ARG GRPC_VERSION=v1.65.0 +ARG CMAKE_VERSION=3.26.4 ENV MAKEFLAGS=${GRPC_MAKEFLAGS} @@ -196,12 +200,14 @@ WORKDIR /build RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates \ - build-essential \ - cmake \ + build-essential curl libssl-dev \ git && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* +# Install CMake (the version in 22.04 is too old) +RUN curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install + # We install GRPC to a different prefix here so that we can copy in only the build artifacts later # saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree # and running make install in the target container diff --git a/backend/python/coqui/test.py b/backend/python/coqui/test.py index d1418fa3..e0b1a0bd 100644 --- a/backend/python/coqui/test.py +++ b/backend/python/coqui/test.py @@ -19,7 +19,7 @@ class TestBackendServicer(unittest.TestCase): This method sets up the gRPC service by starting the server """ self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) - time.sleep(10) + time.sleep(30) def tearDown(self) -> None: """ From d19bea4af24a449be438bdf3e6ee5095ece68137 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 12:27:37 +0200 Subject: [PATCH 0262/1530] chore(vllm): do not install from source (#3745) chore(vllm): do not install from source by default Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/test.py | 2 +- backend/python/vllm/install.sh | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/python/openvoice/test.py b/backend/python/openvoice/test.py index 262917b3..82f08785 100644 --- a/backend/python/openvoice/test.py +++ b/backend/python/openvoice/test.py @@ -19,7 +19,7 @@ class TestBackendServicer(unittest.TestCase): This method sets up the gRPC service by starting the server """ self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) - time.sleep(10) + time.sleep(30) def tearDown(self) -> None: """ diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 022cf8bf..9078b81b 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -13,7 +13,9 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi -if [ "x${BUILD_TYPE}" == "x" ]; then +# We don't embed this into the images as it is a large dependency and not always needed. +# Besides, the speed inference are not actually usable in the current state for production use-cases. +if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then ensureVenv # https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html if [ ! -d vllm ]; then From e06daf437aa1b8d71ea9a40ffa20a2e88b438f11 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 7 Oct 2024 16:42:56 +0200 Subject: [PATCH 0263/1530] chore(Dockerfile): default to cmake from package manager (#3746) Signed-off-by: Ettore Di Giacinto --- Dockerfile | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 532e3d55..a2709877 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,7 @@ USER root ARG GO_VERSION=1.22.6 ARG CMAKE_VERSION=3.26.4 +ARG CMAKE_FROM_SOURCE=false ARG TARGETARCH ARG TARGETVARIANT @@ -28,13 +29,23 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* +# Install CMake (the version in 22.04 is too old) +RUN < Date: Mon, 7 Oct 2024 21:04:18 +0000 Subject: [PATCH 0264/1530] chore(deps): Bump langchain from 0.3.1 to 0.3.2 in /examples/functions (#3755) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.1 to 0.3.2. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.1...langchain==0.3.2) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 952f9d62..49bfcadb 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.3.1 +langchain==0.3.2 openai==1.50.2 From 377cdcabbf4407b2ff1a7c0fa9c63ac02906cd23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 00:05:53 +0000 Subject: [PATCH 0265/1530] chore(deps): Bump openai from 1.50.2 to 1.51.1 in /examples/functions (#3754) Bumps [openai](https://github.com/openai/openai-python) from 1.50.2 to 1.51.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.50.2...v1.51.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 49bfcadb..9e988435 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.2 -openai==1.50.2 +openai==1.51.1 From d9b63fae7ccac3e01512e9736f6eec780c17ec1c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 8 Oct 2024 09:24:19 +0200 Subject: [PATCH 0266/1530] chore(tests): improve rwkv tests and consume TEST_FLAKES (#3765) chores(tests): improve rwkv tests and consume TEST_FLAKES consistently use TEST_FLAKES and reduce flakiness of rwkv tests by being case insensitive Signed-off-by: Ettore Di Giacinto --- Makefile | 14 +++++++------- core/http/app_test.go | 5 +++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 86437b67..af0e2e84 100644 --- a/Makefile +++ b/Makefile @@ -470,13 +470,13 @@ run-e2e-image: run-e2e-aio: protogen-go @echo 'Running e2e AIO tests' - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e-aio + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e-aio test-e2e: @echo 'Running e2e tests' BUILD_TYPE=$(BUILD_TYPE) \ LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \ - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e teardown-e2e: rm -rf $(TEST_DIR) || true @@ -484,24 +484,24 @@ teardown-e2e: test-llama: prepare-test TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \ - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama" --flake-attempts 5 -v -r $(TEST_PATHS) + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS) test-llama-gguf: prepare-test TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \ - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama-gguf" --flake-attempts 5 -v -r $(TEST_PATHS) + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama-gguf" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS) test-tts: prepare-test TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \ - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="tts" --flake-attempts 1 -v -r $(TEST_PATHS) + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="tts" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS) test-stablediffusion: prepare-test TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \ - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts 1 -v -r $(TEST_PATHS) + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS) test-stores: backend-assets/grpc/local-store mkdir -p tests/integration/backend-assets/grpc cp -f backend-assets/grpc/local-store tests/integration/backend-assets/grpc/ - $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stores" --flake-attempts 1 -v -r tests/integration + $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stores" --flake-attempts $(TEST_FLAKES) -v -r tests/integration test-container: docker build --target requirements -t local-ai-test-container . diff --git a/core/http/app_test.go b/core/http/app_test.go index bbe52c34..871a1a32 100644 --- a/core/http/app_test.go +++ b/core/http/app_test.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "github.com/mudler/LocalAI/core/config" . "github.com/mudler/LocalAI/core/http" @@ -950,7 +951,7 @@ var _ = Describe("API test", func() { openai.ChatCompletionRequest{Model: "rwkv_test", Messages: []openai.ChatCompletionMessage{{Content: "Can you count up to five?", Role: "user"}}}) Expect(err).ToNot(HaveOccurred()) Expect(len(resp.Choices) > 0).To(BeTrue()) - Expect(resp.Choices[0].Message.Content).To(Or(ContainSubstring("Sure"), ContainSubstring("five"))) + Expect(strings.ToLower(resp.Choices[0].Message.Content)).To(Or(ContainSubstring("sure"), ContainSubstring("five"))) stream, err := client.CreateChatCompletionStream(context.TODO(), openai.ChatCompletionRequest{Model: "rwkv_test", Messages: []openai.ChatCompletionMessage{{Content: "Can you count up to five?", Role: "user"}}}) Expect(err).ToNot(HaveOccurred()) @@ -969,7 +970,7 @@ var _ = Describe("API test", func() { tokens++ } Expect(text).ToNot(BeEmpty()) - Expect(text).To(Or(ContainSubstring("Sure"), ContainSubstring("five"))) + Expect(strings.ToLower(text)).To(Or(ContainSubstring("sure"), ContainSubstring("five"))) Expect(tokens).ToNot(Or(Equal(1), Equal(0))) }) From 6b8a4023531bac865bdef25aac35b1ae3b203dbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:24:56 +0200 Subject: [PATCH 0267/1530] chore(deps): Bump openai from 1.45.1 to 1.51.1 in /examples/langchain/langchainpy-localai-example (#3748) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.45.1 to 1.51.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.45.1...v1.51.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 1d48dee8..fdab70e9 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.0.5 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.1 -openai==1.45.1 +openai==1.51.1 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.9.2 From b653883c0a88d167b157e049d7dc7005e42fa221 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:32:08 +0200 Subject: [PATCH 0268/1530] chore(deps): Bump multidict from 6.0.5 to 6.1.0 in /examples/langchain/langchainpy-localai-example (#3749) chore(deps): Bump multidict Bumps [multidict](https://github.com/aio-libs/multidict) from 6.0.5 to 6.1.0. - [Release notes](https://github.com/aio-libs/multidict/releases) - [Changelog](https://github.com/aio-libs/multidict/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/multidict/compare/v6.0.5...v6.1.0) --- updated-dependencies: - dependency-name: multidict dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index fdab70e9..d726173c 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -14,7 +14,7 @@ langchain==0.3.1 langchain-community==0.3.1 marshmallow==3.22.0 marshmallow-enum==1.5.1 -multidict==6.0.5 +multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.1 From 287200e687b81b02763869be4525e5703609c5cf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:32:18 +0200 Subject: [PATCH 0269/1530] chore(deps): Bump aiohttp from 3.10.8 to 3.10.9 in /examples/langchain/langchainpy-localai-example (#3750) chore(deps): Bump aiohttp Bumps [aiohttp](https://github.com/aio-libs/aiohttp) from 3.10.8 to 3.10.9. - [Release notes](https://github.com/aio-libs/aiohttp/releases) - [Changelog](https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/aiohttp/compare/v3.10.8...v3.10.9) --- updated-dependencies: - dependency-name: aiohttp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index d726173c..d9f26aae 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -1,4 +1,4 @@ -aiohttp==3.10.8 +aiohttp==3.10.9 aiosignal==1.3.1 async-timeout==4.0.3 attrs==24.2.0 From 5f58841a3a84e29a44f054b20f59842f1fde362c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:32:30 +0200 Subject: [PATCH 0270/1530] chore(deps): Bump llama-index from 0.11.14 to 0.11.16 in /examples/chainlit (#3753) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.14 to 0.11.16. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.14...v0.11.16) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index ee6c63ac..c21991df 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.14 +llama_index==0.11.16 requests==2.32.3 weaviate_client==4.8.1 transformers From f9069daf03c7e8bdd8e3cbf3ece900b388dd4b55 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:32:40 +0200 Subject: [PATCH 0271/1530] chore(deps): Bump streamlit from 1.38.0 to 1.39.0 in /examples/streamlit-bot (#3757) chore(deps): Bump streamlit in /examples/streamlit-bot Bumps [streamlit](https://github.com/streamlit/streamlit) from 1.38.0 to 1.39.0. - [Release notes](https://github.com/streamlit/streamlit/releases) - [Commits](https://github.com/streamlit/streamlit/compare/1.38.0...1.39.0) --- updated-dependencies: - dependency-name: streamlit dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/streamlit-bot/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/streamlit-bot/requirements.txt b/examples/streamlit-bot/requirements.txt index fa8c4118..275060a2 100644 --- a/examples/streamlit-bot/requirements.txt +++ b/examples/streamlit-bot/requirements.txt @@ -1,2 +1,2 @@ -streamlit==1.38.0 +streamlit==1.39.0 requests \ No newline at end of file From 0f44c3f69c23f822796e95103d8f4be6483d4bca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:32:59 +0200 Subject: [PATCH 0272/1530] chore(deps): Bump debugpy from 1.8.2 to 1.8.6 in /examples/langchain/langchainpy-localai-example (#3751) chore(deps): Bump debugpy Bumps [debugpy](https://github.com/microsoft/debugpy) from 1.8.2 to 1.8.6. - [Release notes](https://github.com/microsoft/debugpy/releases) - [Commits](https://github.com/microsoft/debugpy/compare/v1.8.2...v1.8.6) --- updated-dependencies: - dependency-name: debugpy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index d9f26aae..2c83dc70 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -6,7 +6,7 @@ certifi==2024.8.30 charset-normalizer==3.3.2 colorama==0.4.6 dataclasses-json==0.6.7 -debugpy==1.8.2 +debugpy==1.8.6 frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 From 5674e671d0ec48c3fec75c8cfadfa026730d371a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:33:13 +0200 Subject: [PATCH 0273/1530] chore(deps): Bump langchain from 0.3.1 to 0.3.2 in /examples/langchain/langchainpy-localai-example (#3752) chore(deps): Bump langchain Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.1 to 0.3.2. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.1...langchain==0.3.2) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 2c83dc70..a50fd991 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -10,7 +10,7 @@ debugpy==1.8.6 frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 -langchain==0.3.1 +langchain==0.3.2 langchain-community==0.3.1 marshmallow==3.22.0 marshmallow-enum==1.5.1 From 45006500001cb268b83f6922df7818ead835fe7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:33:25 +0200 Subject: [PATCH 0274/1530] chore(deps): Bump openai from 1.50.2 to 1.51.1 in /examples/langchain-chroma (#3758) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.50.2 to 1.51.1. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.50.2...v1.51.1) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index d84311b3..2b711fb8 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.1 -openai==1.50.2 +openai==1.51.1 chromadb==0.5.11 llama-index==0.11.14 \ No newline at end of file From f9c58a01d35fed15106f7dcbbb128531dabda10b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:34:05 +0200 Subject: [PATCH 0275/1530] chore(deps): Bump llama-index from 0.11.14 to 0.11.16 in /examples/langchain-chroma (#3760) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.14 to 0.11.16. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.14...v0.11.16) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 2b711fb8..302eb041 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.1 openai==1.51.1 chromadb==0.5.11 -llama-index==0.11.14 \ No newline at end of file +llama-index==0.11.16 \ No newline at end of file From d5e1958a1ffb4183cd8c7a917b00f7e12995c68a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:34:23 +0200 Subject: [PATCH 0276/1530] chore(deps): Bump nginx from 1.27.0 to 1.27.2 in /examples/k8sgpt (#3761) Bumps nginx from 1.27.0 to 1.27.2. --- updated-dependencies: - dependency-name: nginx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/k8sgpt/broken-pod.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/k8sgpt/broken-pod.yaml b/examples/k8sgpt/broken-pod.yaml index 9d5289de..aa3cc81c 100644 --- a/examples/k8sgpt/broken-pod.yaml +++ b/examples/k8sgpt/broken-pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: broken-pod - image: nginx:1.27.0 + image: nginx:1.27.2 livenessProbe: httpGet: path: / From 2023627d7fc366340a0c3bf7be0c232210b2e393 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:34:43 +0200 Subject: [PATCH 0277/1530] chore(deps): Bump appleboy/ssh-action from 1.0.3 to 1.1.0 (#3762) Bumps [appleboy/ssh-action](https://github.com/appleboy/ssh-action) from 1.0.3 to 1.1.0. - [Release notes](https://github.com/appleboy/ssh-action/releases) - [Changelog](https://github.com/appleboy/ssh-action/blob/master/.goreleaser.yaml) - [Commits](https://github.com/appleboy/ssh-action/compare/v1.0.3...v1.1.0) --- updated-dependencies: - dependency-name: appleboy/ssh-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/deploy-explorer.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-explorer.yaml b/.github/workflows/deploy-explorer.yaml index 7b5c0484..b8f3f7e9 100644 --- a/.github/workflows/deploy-explorer.yaml +++ b/.github/workflows/deploy-explorer.yaml @@ -33,7 +33,7 @@ jobs: run: | CGO_ENABLED=0 make build-api - name: rm - uses: appleboy/ssh-action@v1.0.3 + uses: appleboy/ssh-action@v1.1.0 with: host: ${{ secrets.EXPLORER_SSH_HOST }} username: ${{ secrets.EXPLORER_SSH_USERNAME }} @@ -53,7 +53,7 @@ jobs: rm: true target: ./local-ai - name: restarting - uses: appleboy/ssh-action@v1.0.3 + uses: appleboy/ssh-action@v1.1.0 with: host: ${{ secrets.EXPLORER_SSH_HOST }} username: ${{ secrets.EXPLORER_SSH_USERNAME }} From f1d16a45c5f6079c18c9b62e638a306d3675dc4f Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:35:01 +0200 Subject: [PATCH 0278/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `6374743747b14db4eb73ce82ae449a2978bc3b47` (#3763) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index af0e2e84..7cf65f14 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=d5cb86844f26f600c48bf3643738ea68138f961d +CPPLLAMA_VERSION?=6374743747b14db4eb73ce82ae449a2978bc3b47 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 8586a0167af1651e6d2af892c786ade8b579783c Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 8 Oct 2024 09:35:18 +0200 Subject: [PATCH 0279/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `ebca09a3d1033417b0c630bbbe607b0f185b1488` (#3764) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7cf65f14..9259b05b 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=9f346d00840bcd7af62794871109841af40cecfb +WHISPER_CPP_VERSION?=ebca09a3d1033417b0c630bbbe607b0f185b1488 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 0fdc6a92f62b97bf2043b1f882da07d3a15aaa3b Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 8 Oct 2024 18:38:27 +0200 Subject: [PATCH 0280/1530] models(gallery): add moe-girl-1ba-7bt-i1 (#3766) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 42f375b3..2928c3bf 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,21 @@ --- +- name: "moe-girl-1ba-7bt-i1" + icon: https://cdn-uploads.huggingface.co/production/uploads/634262af8d8089ebaefd410e/kTXXSSSqpb21rfyOX7FUa.jpeg + # chatml + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + urls: + - https://huggingface.co/allura-org/MoE-Girl-1BA-7BT + - https://huggingface.co/mradermacher/MoE-Girl-1BA-7BT-i1-GGUF + description: | + A finetune of OLMoE by AllenAI designed for roleplaying (and maybe general usecases if you try hard enough). + PLEASE do not expect godliness out of this, it's a model with 1 billion active parameters. Expect something more akin to Gemma 2 2B, not Llama 3 8B. + overrides: + parameters: + model: MoE-Girl-1BA-7BT.i1-Q4_K_M.gguf + files: + - filename: MoE-Girl-1BA-7BT.i1-Q4_K_M.gguf + sha256: e6ef9c311c73573b243de6ff7538b386f430af30b2be0a96a5745c17137ad432 + uri: huggingface://mradermacher/MoE-Girl-1BA-7BT-i1-GGUF/MoE-Girl-1BA-7BT.i1-Q4_K_M.gguf - name: "salamandra-7b-instruct" icon: https://huggingface.co/BSC-LT/salamandra-7b-instruct/resolve/main/images/salamandra_header.png # Uses chatml From 78459889d8faae3d067d9506a30faaaf0d776448 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 8 Oct 2024 18:51:30 +0200 Subject: [PATCH 0281/1530] models(gallery): add archfunctions models (#3767) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 53 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 2928c3bf..2ae53736 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -353,6 +353,59 @@ - filename: T.E-8.1-Q4_K_M-imat.gguf sha256: 1b7892b82c01ea4cbebe34cd00f9836cbbc369fc3247c1f44a92842201e7ec0b uri: huggingface://Lewdiculous/T.E-8.1-GGUF-IQ-Imatrix-Request/T.E-8.1-Q4_K_M-imat.gguf +- &archfunct + license: apache-2.0 + tags: + - llm + - gguf + - gpu + - qwen + - qwen2.5 + - cpu + - function-calling + name: "arch-function-1.5b" + uri: "github:mudler/LocalAI/gallery/arch-function.yaml@master" + urls: + - https://huggingface.co/katanemolabs/Arch-Function-1.5B + - https://huggingface.co/mradermacher/Arch-Function-1.5B-GGUF + description: | + The Katanemo Arch-Function collection of large language models (LLMs) is a collection state-of-the-art (SOTA) LLMs specifically designed for function calling tasks. The models are designed to understand complex function signatures, identify required parameters, and produce accurate function call outputs based on natural language prompts. Achieving performance on par with GPT-4, these models set a new benchmark in the domain of function-oriented tasks, making them suitable for scenarios where automated API interaction and function execution is crucial. + In summary, the Katanemo Arch-Function collection demonstrates: + State-of-the-art performance in function calling + Accurate parameter identification and suggestion, even in ambiguous or incomplete inputs + High generalization across multiple function calling use cases, from API interactions to automated backend tasks. + Optimized low-latency, high-throughput performance, making it suitable for real-time, production environments. + overrides: + parameters: + model: Arch-Function-1.5B.Q4_K_M.gguf + files: + - filename: Arch-Function-1.5B.Q4_K_M.gguf + sha256: 5ac54d2d50cca0ee0335ca2c9b688204c0829cd3a73de3ee3fda108281ad9691 + uri: huggingface://mradermacher/Arch-Function-1.5B-GGUF/Arch-Function-1.5B.Q4_K_M.gguf +- !!merge <<: *archfunct + name: "arch-function-7b" + urls: + - https://huggingface.co/katanemolabs/Arch-Function-7B + - https://huggingface.co/mradermacher/Arch-Function-7B-GGUF + overrides: + parameters: + model: Arch-Function-7B.Q4_K_M.gguf + files: + - filename: Arch-Function-7B.Q4_K_M.gguf + sha256: 6e38661321d79d02b8cf57c79d97c6c0e19adb9ffa66083cc440c24e257234b6 + uri: huggingface://mradermacher/Arch-Function-7B-GGUF/Arch-Function-7B.Q4_K_M.gguf +- !!merge <<: *archfunct + name: "arch-function-3b" + urls: + - https://huggingface.co/katanemolabs/Arch-Function-3B + - https://huggingface.co/mradermacher/Arch-Function-3B-GGUF + overrides: + parameters: + model: Arch-Function-3B.Q4_K_M.gguf + files: + - filename: Arch-Function-3B.Q4_K_M.gguf + sha256: 9945cb8d070498d163e5df90c1987f591d35e4fd2222a6c51bcfff848c4b573b + uri: huggingface://mradermacher/Arch-Function-3B-GGUF/Arch-Function-3B.Q4_K_M.gguf - &smollm ## SmolLM url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 62165d556cea8a355adbdfc4d3b23eccc90f6180 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 8 Oct 2024 18:52:21 +0200 Subject: [PATCH 0282/1530] models(gallery): add archfunctions template Signed-off-by: Ettore Di Giacinto --- gallery/arch-function.yaml | 66 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 gallery/arch-function.yaml diff --git a/gallery/arch-function.yaml b/gallery/arch-function.yaml new file mode 100644 index 00000000..dc8f290a --- /dev/null +++ b/gallery/arch-function.yaml @@ -0,0 +1,66 @@ +--- +name: "chatml" + +config_file: | + mmap: true + function: + disable_no_action: true + grammar: + mixed_mode: false + disable: true + parallel_calls: true + expect_strings_after_json: true + json_regex_match: + - "(?s)(.*?)" + - "(?s)(.*)" + capture_llm_results: + - (?s)(.*?) + replace_llm_results: + - key: (?s)(.*?) + value: "" + template: + chat_message: | + <|im_start|>{{ .RoleName }} + {{ if .FunctionCall -}} + Function call: + {{ else if eq .RoleName "tool" -}} + Function response: + {{ end -}} + {{ if .Content -}} + {{.Content }} + {{ end -}} + {{ if .FunctionCall -}} + {{toJson .FunctionCall}} + {{ end -}}<|im_end|> + function: | + <|im_start|>system + # Tools + + You may call one or more functions to assist with the user query. + + You are provided with function signatures within XML tags: + + {{range .Functions}} + {'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }} + {{end}} + + For each function call, return a json object with function name and arguments within XML tags: + + {"name": , "arguments": } + + <|im_end|> + {{.Input -}} + <|im_start|>assistant + chat: | + {{.Input -}} + <|im_start|>assistant + completion: | + {{.Input}} + context_size: 4096 + f16: true + stopwords: + - '<|im_end|>' + - '' + - '' + - "<|eot_id|>" + - "<|end_of_text|>" \ No newline at end of file From 825e85bcc54d592186631e2ebe2679a416be03bc Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 8 Oct 2024 23:41:05 +0200 Subject: [PATCH 0283/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `dca1d4b58a7f1acf1bd253be84e50d6367f492fd` (#3769) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9259b05b..389da752 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=6374743747b14db4eb73ce82ae449a2978bc3b47 +CPPLLAMA_VERSION?=dca1d4b58a7f1acf1bd253be84e50d6367f492fd # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 759d35e6b51f0b9772621d8a80696c39995ce115 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 9 Oct 2024 09:42:44 +0200 Subject: [PATCH 0284/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `fdbfb460ed546452a5d53611bba66d10d842e719` (#3768) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 389da752..c4215b84 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=ebca09a3d1033417b0c630bbbe607b0f185b1488 +WHISPER_CPP_VERSION?=fdbfb460ed546452a5d53611bba66d10d842e719 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 1a3b3d3e6723f8d1314f2c626b219e52a2b7b7b8 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 9 Oct 2024 16:58:34 +0200 Subject: [PATCH 0285/1530] models(gallery): add versatillama-llama-3.2-3b-instruct-abliterated (#3771) Signed-off-by: Ettore Di Giacinto --- gallery/arch-function.yaml | 2 +- gallery/index.yaml | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/gallery/arch-function.yaml b/gallery/arch-function.yaml index dc8f290a..a527d0f7 100644 --- a/gallery/arch-function.yaml +++ b/gallery/arch-function.yaml @@ -63,4 +63,4 @@ config_file: | - '' - '' - "<|eot_id|>" - - "<|end_of_text|>" \ No newline at end of file + - "<|end_of_text|>" diff --git a/gallery/index.yaml b/gallery/index.yaml index 2ae53736..ee303751 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -100,6 +100,21 @@ - filename: llama-3.2-1b-instruct-q8_0.gguf sha256: ba345c83bf5cc679c653b853c46517eea5a34f03ed2205449db77184d9ae62a9 uri: huggingface://hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF/llama-3.2-1b-instruct-q8_0.gguf +## Uncensored +- !!merge <<: *llama32 + icon: https://cdn-uploads.huggingface.co/production/uploads/66c9d7a26f2335ba288810a4/4YDg-rcEXCK0fdTS1fBzE.webp + name: "versatillama-llama-3.2-3b-instruct-abliterated" + urls: + - https://huggingface.co/QuantFactory/VersatiLlama-Llama-3.2-3B-Instruct-Abliterated-GGUF + description: | + Small but Smart Fine-Tuned on Vast dataset of Conversations. Able to Generate Human like text with high performance within its size. It is Very Versatile when compared for it's size and Parameters and offers capability almost as good as Llama 3.1 8B Instruct. + overrides: + parameters: + model: VersatiLlama-Llama-3.2-3B-Instruct-Abliterated.Q4_K_M.gguf + files: + - filename: VersatiLlama-Llama-3.2-3B-Instruct-Abliterated.Q4_K_M.gguf + sha256: 15b9e4a987f50d7594d030815c7166a996e20db46fe1e20da03e96955020312c + uri: huggingface://QuantFactory/VersatiLlama-Llama-3.2-3B-Instruct-Abliterated-GGUF/VersatiLlama-Llama-3.2-3B-Instruct-Abliterated.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From a9e42a76fa1590c62e208fcacd5fe4c021dfba4f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 9 Oct 2024 17:05:50 +0200 Subject: [PATCH 0286/1530] models(gallery): add llama3.2-3b-enigma (#3772) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index ee303751..e1eb4e07 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -115,6 +115,20 @@ - filename: VersatiLlama-Llama-3.2-3B-Instruct-Abliterated.Q4_K_M.gguf sha256: 15b9e4a987f50d7594d030815c7166a996e20db46fe1e20da03e96955020312c uri: huggingface://QuantFactory/VersatiLlama-Llama-3.2-3B-Instruct-Abliterated-GGUF/VersatiLlama-Llama-3.2-3B-Instruct-Abliterated.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama3.2-3b-enigma" + icon: https://cdn-uploads.huggingface.co/production/uploads/64f267a8a4f79a118e0fcc89/it7MY5MyLCLpFQev5dUis.jpeg + urls: + - https://huggingface.co/QuantFactory/Llama3.2-3B-Enigma-GGUF + description: | + Enigma is a code-instruct model built on Llama 3.2 3b. It is a high quality code instruct model with the Llama 3.2 Instruct chat format. The model is finetuned on synthetic code-instruct data generated with Llama 3.1 405b and supplemented with generalist synthetic data. It uses the Llama 3.2 Instruct prompt format. + overrides: + parameters: + model: Llama3.2-3B-Enigma.Q4_K_M.gguf + files: + - filename: Llama3.2-3B-Enigma.Q4_K_M.gguf + sha256: 4304e6ee1e348b228470700ec1e9423f5972333d376295195ce6cd5c70cae5e4 + uri: huggingface://QuantFactory/Llama3.2-3B-Enigma-GGUF/Llama3.2-3B-Enigma.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 8c7439b96ebfd6f75548021a3d361ae44d21bd23 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 9 Oct 2024 17:08:13 +0200 Subject: [PATCH 0287/1530] models(gallery): add llama3.2-3b-esper2 (#3773) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index e1eb4e07..c4166848 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -129,6 +129,20 @@ - filename: Llama3.2-3B-Enigma.Q4_K_M.gguf sha256: 4304e6ee1e348b228470700ec1e9423f5972333d376295195ce6cd5c70cae5e4 uri: huggingface://QuantFactory/Llama3.2-3B-Enigma-GGUF/Llama3.2-3B-Enigma.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama3.2-3b-esper2" + icon: https://cdn-uploads.huggingface.co/production/uploads/64f267a8a4f79a118e0fcc89/4I6oK8DG0so4VD8GroFsd.jpeg + urls: + - https://huggingface.co/QuantFactory/Llama3.2-3B-Esper2-GGUF + description: | + Esper 2 is a DevOps and cloud architecture code specialist built on Llama 3.2 3b. It is an AI assistant focused on AWS, Azure, GCP, Terraform, Dockerfiles, pipelines, shell scripts and more, with real world problem solving and high quality code instruct performance within the Llama 3.2 Instruct chat format. Finetuned on synthetic DevOps-instruct and code-instruct data generated with Llama 3.1 405b and supplemented with generalist chat data. + overrides: + parameters: + model: Llama3.2-3B-Esper2.Q4_K_M.gguf + files: + - filename: Llama3.2-3B-Esper2.Q4_K_M.gguf + sha256: 11d2bd674aa22a71a59ec49ad29b695000d14bc275b0195b8d7089bfc7582fc7 + uri: huggingface://QuantFactory/Llama3.2-3B-Esper2-GGUF/Llama3.2-3B-Esper2.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 08a54c1812735b4ccf63853f0e6ce4a230dd617a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 9 Oct 2024 17:16:17 +0200 Subject: [PATCH 0288/1530] models(gallery): add llama-3.1-swallow-70b-v0.1-i1 (#3774) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c4166848..5ac80b68 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -958,6 +958,21 @@ - filename: NightyGurps-14b-v1.1-Q4_K_M.gguf sha256: d09d53259ad2c0298150fa8c2db98fe42f11731af89fdc80ad0e255a19adc4b0 uri: huggingface://bartowski/NightyGurps-14b-v1.1-GGUF/NightyGurps-14b-v1.1-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama-3.1-swallow-70b-v0.1-i1" + icon: https://huggingface.co/tokyotech-llm/Llama-3.1-Swallow-70B-v0.1/resolve/main/logo.png + urls: + - https://huggingface.co/tokyotech-llm/Llama-3.1-Swallow-70B-v0.1 + - https://huggingface.co/mradermacher/Llama-3.1-Swallow-70B-v0.1-i1-GGUF + description: | + Llama 3.1 Swallow is a series of large language models (8B, 70B) that were built by continual pre-training on the Meta Llama 3.1 models. Llama 3.1 Swallow enhanced the Japanese language capabilities of the original Llama 3.1 while retaining the English language capabilities. We use approximately 200 billion tokens that were sampled from a large Japanese web corpus (Swallow Corpus Version 2), Japanese and English Wikipedia articles, and mathematical and coding contents, etc (see the Training Datasets section) for continual pre-training. The instruction-tuned models (Instruct) were built by supervised fine-tuning (SFT) on the synthetic data specially built for Japanese. See the Swallow Model Index section to find other model variants. + overrides: + parameters: + model: Llama-3.1-Swallow-70B-v0.1.i1-Q4_K_M.gguf + files: + - filename: Llama-3.1-Swallow-70B-v0.1.i1-Q4_K_M.gguf + sha256: 9eaa08a4872a26f56fe34b27a99f7bd0d22ee2b2d1c84cfcde2091b5f61af5fa + uri: huggingface://mradermacher/Llama-3.1-Swallow-70B-v0.1-i1-GGUF/Llama-3.1-Swallow-70B-v0.1.i1-Q4_K_M.gguf ## Uncensored models - !!merge <<: *llama31 name: "humanish-roleplay-llama-3.1-8b-i1" From ea8675d4734cc89eb23e0da11e3d13c746a40f48 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 9 Oct 2024 23:40:46 +0200 Subject: [PATCH 0289/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `c81f3bbb051f8b736e117dfc78c99d7c4e0450f6` (#3775) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c4215b84..dfe35be6 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=dca1d4b58a7f1acf1bd253be84e50d6367f492fd +CPPLLAMA_VERSION?=c81f3bbb051f8b736e117dfc78c99d7c4e0450f6 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 85a3cc8d8fdd80dad279a823c14a3d21e330b5b8 Mon Sep 17 00:00:00 2001 From: Josh Bennett <562773+joshbtn@users.noreply.github.com> Date: Thu, 10 Oct 2024 04:42:59 -0400 Subject: [PATCH 0290/1530] feat(transformers): Use downloaded model for Transformers backend if it already exists. (#3777) * signing commit Signed-off-by: Josh Bennett <562773+joshbtn@users.noreply.github.com> * Update transformers backend to check for existing model directory Signed-off-by: Josh Bennett <562773+joshbtn@users.noreply.github.com> --------- Signed-off-by: Josh Bennett <562773+joshbtn@users.noreply.github.com> --- backend/python/transformers/backend.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/backend/python/transformers/backend.py b/backend/python/transformers/backend.py index 6e809f28..2075012e 100644 --- a/backend/python/transformers/backend.py +++ b/backend/python/transformers/backend.py @@ -72,7 +72,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): Returns: A Result object that contains the result of the LoadModel operation. """ + model_name = request.Model + + # Check to see if the Model exists in the filesystem already. + if os.path.exists(request.ModelFile): + model_name = request.ModelFile compute = torch.float16 if request.F16Memory == True: From f41965bfb52970de867b12804eb8cbabb626b161 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 10:47:41 +0200 Subject: [PATCH 0291/1530] models(gallery): add rombos-llm-v2.5.1-qwen-3b (#3778) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 5ac80b68..a0279190 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -396,6 +396,28 @@ - filename: T.E-8.1-Q4_K_M-imat.gguf sha256: 1b7892b82c01ea4cbebe34cd00f9836cbbc369fc3247c1f44a92842201e7ec0b uri: huggingface://Lewdiculous/T.E-8.1-GGUF-IQ-Imatrix-Request/T.E-8.1-Q4_K_M-imat.gguf +- !!merge <<: *qwen25 + name: "rombos-llm-v2.5.1-qwen-3b" + icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/pNDtgE5FDkxxvbG4qiZ1A.jpeg + urls: + - https://huggingface.co/QuantFactory/Rombos-LLM-V2.5.1-Qwen-3b-GGUF + description: | + Rombos-LLM-V2.5.1-Qwen-3b is a little experiment that merges a high-quality LLM, arcee-ai/raspberry-3B, using the last step of the Continuous Finetuning method outlined in a Google document. The merge is done using the mergekit with the following parameters: + + - Models: Qwen2.5-3B-Instruct, raspberry-3B + - Merge method: ties + - Base model: Qwen2.5-3B + - Parameters: weight=1, density=1, normalize=true, int8_mask=true + - Dtype: bfloat16 + + The model has been evaluated on various tasks and datasets, and the results are available on the Open LLM Leaderboard. The model has shown promising performance across different benchmarks. + overrides: + parameters: + model: Rombos-LLM-V2.5.1-Qwen-3b.Q4_K_M.gguf + files: + - filename: Rombos-LLM-V2.5.1-Qwen-3b.Q4_K_M.gguf + sha256: 656c342a2921cac8912e0123fc295c3bb3d631a85c671c12a3843a957e46d30d + uri: huggingface://QuantFactory/Rombos-LLM-V2.5.1-Qwen-3b-GGUF/Rombos-LLM-V2.5.1-Qwen-3b.Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From 614125f268e9c55f630872740367b5925fc9e978 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 15:05:09 +0200 Subject: [PATCH 0292/1530] models(gallery): add qwen2.5-7b-ins-v3 (#3779) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index a0279190..86a0a9a1 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -418,6 +418,21 @@ - filename: Rombos-LLM-V2.5.1-Qwen-3b.Q4_K_M.gguf sha256: 656c342a2921cac8912e0123fc295c3bb3d631a85c671c12a3843a957e46d30d uri: huggingface://QuantFactory/Rombos-LLM-V2.5.1-Qwen-3b-GGUF/Rombos-LLM-V2.5.1-Qwen-3b.Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qwen2.5-7b-ins-v3" + urls: + - https://huggingface.co/happzy2633/qwen2.5-7b-ins-v3 + - https://huggingface.co/bartowski/qwen2.5-7b-ins-v3-GGUF + description: | + Qwen 2.5 fine-tuned on CoT to match o1 performance. An attempt to build an Open o1 mathcing OpenAI o1 model + Demo: https://huggingface.co/spaces/happzy2633/open-o1 + overrides: + parameters: + model: qwen2.5-7b-ins-v3-Q4_K_M.gguf + files: + - filename: qwen2.5-7b-ins-v3-Q4_K_M.gguf + sha256: 9c23734072714a4886c0386ae0ff07a5e940d67ad52278e2ed689fec44e1e0c8 + uri: huggingface://bartowski/qwen2.5-7b-ins-v3-GGUF/qwen2.5-7b-ins-v3-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From 6e78d8cd9d30eed447911944467017b7a4d1b349 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 18:56:01 +0200 Subject: [PATCH 0293/1530] models(gallery): add dans-personalityengine-v1.0.0-8b (#3780) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 86a0a9a1..efe961a1 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1342,6 +1342,23 @@ - filename: Violet_Twilight-v0.2-Q4_K_M-imat.gguf sha256: 0793d196a00cd6fd4e67b8c585b27a94d397e33d427e4ad4aa9a16b7abc339cd uri: huggingface://Lewdiculous/Violet_Twilight-v0.2-GGUF-IQ-Imatrix/Violet_Twilight-v0.2-Q4_K_M-imat.gguf +- !!merge <<: *llama31 + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + name: "dans-personalityengine-v1.0.0-8b" + urls: + - https://huggingface.co/PocketDoc/Dans-PersonalityEngine-v1.0.0-8b + - https://huggingface.co/bartowski/Dans-PersonalityEngine-v1.0.0-8b-GGUF + description: | + This model is intended to be multifarious in its capabilities and should be quite capable at both co-writing and roleplay as well as find itself quite at home performing sentiment analysis or summarization as part of a pipeline. It has been trained on a wide array of one shot instructions, multi turn instructions, role playing scenarios, text adventure games, co-writing, and much more. The full dataset is publicly available and can be found in the datasets section of the model page. + + There has not been any form of harmfulness alignment done on this model, please take the appropriate precautions when using it in a production environment. + overrides: + parameters: + model: Dans-PersonalityEngine-v1.0.0-8b-Q4_K_M.gguf + files: + - filename: Dans-PersonalityEngine-v1.0.0-8b-Q4_K_M.gguf + sha256: 193b66434c9962e278bb171a21e652f0d3f299f04e86c95f9f75ec5aa8ff006e + uri: huggingface://bartowski/Dans-PersonalityEngine-v1.0.0-8b-GGUF/Dans-PersonalityEngine-v1.0.0-8b-Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From ad5e7d376a070ac9773d3202cc18d1d716079596 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 19:06:58 +0200 Subject: [PATCH 0294/1530] models(gallery): add llama-3.2-3b-agent007 (#3781) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index efe961a1..f322ab38 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -143,6 +143,19 @@ - filename: Llama3.2-3B-Esper2.Q4_K_M.gguf sha256: 11d2bd674aa22a71a59ec49ad29b695000d14bc275b0195b8d7089bfc7582fc7 uri: huggingface://QuantFactory/Llama3.2-3B-Esper2-GGUF/Llama3.2-3B-Esper2.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama-3.2-3b-agent007" + urls: + - https://huggingface.co/QuantFactory/Llama-3.2-3B-Agent007-GGUF + description: | + The model is a quantized version of EpistemeAI/Llama-3.2-3B-Agent007, developed by EpistemeAI and fine-tuned from unsloth/llama-3.2-3b-instruct-bnb-4bit. It was trained 2x faster with Unsloth and Huggingface's TRL library. Fine tuned with Agent datasets. + overrides: + parameters: + model: Llama-3.2-3B-Agent007.Q4_K_M.gguf + files: + - filename: Llama-3.2-3B-Agent007.Q4_K_M.gguf + sha256: 7a2543a69b116f2a059e2e445e5d362bb7df4a51b97e83d8785c1803dc9d687f + uri: huggingface://QuantFactory/Llama-3.2-3B-Agent007-GGUF/Llama-3.2-3B-Agent007.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From bdd351b3729f6742cea09a9329bb6a49294cfef0 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 19:09:49 +0200 Subject: [PATCH 0295/1530] models(gallery): add nihappy-l3.1-8b-v0.09 (#3782) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f322ab38..33541f07 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1372,6 +1372,20 @@ - filename: Dans-PersonalityEngine-v1.0.0-8b-Q4_K_M.gguf sha256: 193b66434c9962e278bb171a21e652f0d3f299f04e86c95f9f75ec5aa8ff006e uri: huggingface://bartowski/Dans-PersonalityEngine-v1.0.0-8b-GGUF/Dans-PersonalityEngine-v1.0.0-8b-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "nihappy-l3.1-8b-v0.09" + urls: + - https://huggingface.co/Arkana08/NIHAPPY-L3.1-8B-v0.09 + - https://huggingface.co/QuantFactory/NIHAPPY-L3.1-8B-v0.09-GGUF + description: | + The model is a quantized version of Arkana08/NIHAPPY-L3.1-8B-v0.09 created using llama.cpp. It is a role-playing model that integrates the finest qualities of various pre-trained language models, focusing on dynamic storytelling. + overrides: + parameters: + model: NIHAPPY-L3.1-8B-v0.09.Q4_K_M.gguf + files: + - filename: NIHAPPY-L3.1-8B-v0.09.Q4_K_M.gguf + sha256: 9bd46a06093448b143bd2775f0fb1b1b172c851fafdce31289e13b7dfc23a0d7 + uri: huggingface://QuantFactory/NIHAPPY-L3.1-8B-v0.09-GGUF/NIHAPPY-L3.1-8B-v0.09.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From ed2946feacb2dc95c04c07a263dce9daa2490198 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 19:11:50 +0200 Subject: [PATCH 0296/1530] models(gallery): add llama-3.2-3b-agent007-coder (#3783) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 33541f07..d262162c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -156,6 +156,19 @@ - filename: Llama-3.2-3B-Agent007.Q4_K_M.gguf sha256: 7a2543a69b116f2a059e2e445e5d362bb7df4a51b97e83d8785c1803dc9d687f uri: huggingface://QuantFactory/Llama-3.2-3B-Agent007-GGUF/Llama-3.2-3B-Agent007.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama-3.2-3b-agent007-coder" + urls: + - https://huggingface.co/QuantFactory/Llama-3.2-3B-Agent007-Coder-GGUF + description: | + The Llama-3.2-3B-Agent007-Coder-GGUF is a quantized version of the EpistemeAI/Llama-3.2-3B-Agent007-Coder model, which is a fine-tuned version of the unsloth/llama-3.2-3b-instruct-bnb-4bit model. It is created using llama.cpp and trained with additional datasets such as the Agent dataset, Code Alpaca 20K, and magpie ultra 0.1. This model is optimized for multilingual dialogue use cases and agentic retrieval and summarization tasks. The model is available for commercial and research use in multiple languages and is best used with the transformers library. + overrides: + parameters: + model: Llama-3.2-3B-Agent007-Coder.Q4_K_M.gguf + files: + - filename: Llama-3.2-3B-Agent007-Coder.Q4_K_M.gguf + sha256: 49a4861c094d94ef5faa33f69b02cd132bb0167f1c3ca59059404f85f61e1d12 + uri: huggingface://QuantFactory/Llama-3.2-3B-Agent007-Coder-GGUF/Llama-3.2-3B-Agent007-Coder.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 164abb8c9fcc278425401b387e434576da607a5c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 19:13:47 +0200 Subject: [PATCH 0297/1530] models(gallery): add fireball-meta-llama-3.2-8b-instruct-agent-003-128k-code-dpo (#3784) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d262162c..77d8e68b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -169,6 +169,19 @@ - filename: Llama-3.2-3B-Agent007-Coder.Q4_K_M.gguf sha256: 49a4861c094d94ef5faa33f69b02cd132bb0167f1c3ca59059404f85f61e1d12 uri: huggingface://QuantFactory/Llama-3.2-3B-Agent007-Coder-GGUF/Llama-3.2-3B-Agent007-Coder.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "fireball-meta-llama-3.2-8b-instruct-agent-003-128k-code-dpo" + urls: + - https://huggingface.co/QuantFactory/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO-GGUF + description: | + The LLM model is a quantized version of EpistemeAI/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO, which is an experimental and revolutionary fine-tune with DPO dataset to allow LLama 3.1 8B to be an agentic coder. It has some built-in agent features such as search, calculator, and ReAct. Other noticeable features include self-learning using unsloth, RAG applications, and memory. The context window of the model is 128K. It can be integrated into projects using popular libraries like Transformers and vLLM. The model is suitable for use with Langchain or LLamaIndex. The model is developed by EpistemeAI and licensed under the Apache 2.0 license. + overrides: + parameters: + model: Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO.Q4_K_M.gguf + files: + - filename: Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO.Q4_K_M.gguf + sha256: 7f45fa79bc6c9847ef9fbad08c3bb5a0f2dbb56d2e2200a5d37b260a57274e55 + uri: huggingface://QuantFactory/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO-GGUF/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 568a01bf5c5fd97b3ec9e493e96a5226ea953687 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 19:16:23 +0200 Subject: [PATCH 0298/1530] models(gallery): add gemma-2-ataraxy-v3i-9b (#3785) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 77d8e68b..c25ccc02 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2636,6 +2636,19 @@ - filename: gemma-2-9b-it-abliterated-Q4_K_M.gguf sha256: 88d84ac9796732c10f6c58e0feb4db8e04c05d74bdb7047a5e37906a589896e1 uri: huggingface://bartowski/gemma-2-9b-it-abliterated-GGUF/gemma-2-9b-it-abliterated-Q4_K_M.gguf +- !!merge <<: *gemma + name: "gemma-2-ataraxy-v3i-9b" + urls: + - https://huggingface.co/QuantFactory/Gemma-2-Ataraxy-v3i-9B-GGUF + description: | + Gemma-2-Ataraxy-v3i-9B is an experimental model that replaces the simpo model in the original recipe with a different simpo model and a writing model trained on Gutenberg, using a higher density. It is a merge of pre-trained language models created using mergekit, with della merge method using unsloth/gemma-2-9b-it as the base. The models included in the merge are nbeerbower/Gemma2-Gutenberg-Doppel-9B, ifable/gemma-2-Ifable-9B, and wzhouad/gemma-2-9b-it-WPO-HB. It has been quantized using llama.cpp. + overrides: + parameters: + model: Gemma-2-Ataraxy-v3i-9B.Q4_K_M.gguf + files: + - filename: Gemma-2-Ataraxy-v3i-9B.Q4_K_M.gguf + sha256: f14c5b9373d4058f0f812c6c34184addeb4aeeecb02a7bbcf9844d9afc8d0066 + uri: huggingface://QuantFactory/Gemma-2-Ataraxy-v3i-9B-GGUF/Gemma-2-Ataraxy-v3i-9B.Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 671cd42917d1935b033f9b8407f03b201c6bca64 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Oct 2024 19:54:07 +0200 Subject: [PATCH 0299/1530] chore(gallery): do not specify backend with moondream Signed-off-by: Ettore Di Giacinto --- gallery/moondream.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/gallery/moondream.yaml b/gallery/moondream.yaml index a7599671..d3511f20 100644 --- a/gallery/moondream.yaml +++ b/gallery/moondream.yaml @@ -3,7 +3,6 @@ name: "moondream2" config_file: | - backend: llama-cpp context_size: 2046 roles: user: "\nQuestion: " From 1996e6f4c9a87491b0ecd75b7de7b095291ea282 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 10 Oct 2024 23:46:50 +0200 Subject: [PATCH 0300/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `0e9f760eb12546704ef8fa72577bc1a3ffe1bc04` (#3786) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index dfe35be6..0bb10f3c 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=c81f3bbb051f8b736e117dfc78c99d7c4e0450f6 +CPPLLAMA_VERSION?=0e9f760eb12546704ef8fa72577bc1a3ffe1bc04 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From be6c4e6061f45f67b6bbe0382e758ba587ff4978 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 11 Oct 2024 16:55:57 +0200 Subject: [PATCH 0301/1530] fix(llama-cpp): consistently select fallback (#3789) * fix(llama-cpp): consistently select fallback We didn't took in consideration the case where the host has the CPU flagset, but the binaries were not actually present in the asset dir. This made possible for instance for models that specified the llama-cpp backend directly in the config to not eventually pick-up the fallback binary in case the optimized binaries were not present. Signed-off-by: Ettore Di Giacinto * chore: adjust and simplify selection Signed-off-by: Ettore Di Giacinto * fix: move failure recovery to BackendLoader() Signed-off-by: Ettore Di Giacinto * comments Signed-off-by: Ettore Di Giacinto * minor fixups Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- pkg/model/initializers.go | 135 ++++++++++---------- pkg/model/{options.go => loader_options.go} | 0 2 files changed, 65 insertions(+), 70 deletions(-) rename pkg/model/{options.go => loader_options.go} (100%) diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 1171de4d..c3b37179 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -28,7 +28,7 @@ var Aliases map[string]string = map[string]string{ "langchain-huggingface": LCHuggingFaceBackend, } -var autoDetect = os.Getenv("DISABLE_AUTODETECT") != "true" +var AutoDetect = os.Getenv("DISABLE_AUTODETECT") != "true" const ( LlamaGGML = "llama-ggml" @@ -62,7 +62,7 @@ func backendPath(assetDir, backend string) string { // backendsInAssetDir returns the list of backends in the asset directory // that should be loaded -func backendsInAssetDir(assetDir string) ([]string, error) { +func backendsInAssetDir(assetDir string) (map[string][]string, error) { // Exclude backends from automatic loading excludeBackends := []string{LocalStoreBackend} entry, err := os.ReadDir(backendPath(assetDir, "")) @@ -86,7 +86,7 @@ ENTRY: // Skip the llama.cpp variants if we are autoDetecting // But we always load the fallback variant if it exists - if strings.Contains(e.Name(), LLamaCPP) && !strings.Contains(e.Name(), LLamaCPPFallback) && autoDetect { + if strings.Contains(e.Name(), LLamaCPP) && !strings.Contains(e.Name(), LLamaCPPFallback) && AutoDetect { continue } @@ -94,7 +94,7 @@ ENTRY: } // if we are autoDetecting, we want to show the llama.cpp variants as a single backend - if autoDetect { + if AutoDetect { // if we find the llama.cpp variants, show them of as a single backend (llama-cpp) as later we are going to pick that up // when starting the service foundLCPPAVX, foundLCPPAVX2, foundLCPPFallback, foundLCPPGRPC, foundLCPPCuda, foundLCPPHipblas, foundSycl16, foundSycl32 := false, false, false, false, false, false, false, false @@ -136,6 +136,10 @@ ENTRY: } } + return backends, nil +} + +func orderBackends(backends map[string][]string) ([]string, error) { // order backends from the asset directory. // as we scan for backends, we want to keep some order which backends are tried of. // for example, llama.cpp should be tried first, and we want to keep the huggingface backend at the last. @@ -181,8 +185,9 @@ ENTRY: return orderedBackends.Keys(), nil } -// selectGRPCProcess selects the GRPC process to start based on system capabilities -func selectGRPCProcess(backend, assetDir string, f16 bool) string { +// selectGRPCProcessByHostCapabilities selects the GRPC process to start based on system capabilities +// Note: this is now relevant only for llama.cpp +func selectGRPCProcessByHostCapabilities(backend, assetDir string, f16 bool) string { foundCUDA := false foundAMDGPU := false foundIntelGPU := false @@ -199,6 +204,7 @@ func selectGRPCProcess(backend, assetDir string, f16 bool) string { return backendPath(assetDir, LLamaCPPGRPC) } + // Check for GPU-binaries that are shipped with single binary releases gpus, err := xsysinfo.GPUs() if err == nil { for _, gpu := range gpus { @@ -243,32 +249,37 @@ func selectGRPCProcess(backend, assetDir string, f16 bool) string { return grpcProcess } + // No GPU found or no specific binaries found, try to load the CPU variant(s) + + // Select the Fallback by default + selectedProcess := backendPath(assetDir, LLamaCPPFallback) + + // IF we find any optimized binary, we use that if xsysinfo.HasCPUCaps(cpuid.AVX2) { p := backendPath(assetDir, LLamaCPPAVX2) if _, err := os.Stat(p); err == nil { log.Info().Msgf("[%s] attempting to load with AVX2 variant", backend) - grpcProcess = p + selectedProcess = p } } else if xsysinfo.HasCPUCaps(cpuid.AVX) { p := backendPath(assetDir, LLamaCPPAVX) if _, err := os.Stat(p); err == nil { log.Info().Msgf("[%s] attempting to load with AVX variant", backend) - grpcProcess = p - } - } else { - p := backendPath(assetDir, LLamaCPPFallback) - if _, err := os.Stat(p); err == nil { - log.Info().Msgf("[%s] attempting to load with fallback variant", backend) - grpcProcess = p + selectedProcess = p } } - return grpcProcess + // Check if the binary exists! + if _, err := os.Stat(selectedProcess); err == nil { + return selectedProcess + } + + return "" } // starts the grpcModelProcess for the backend, and returns a grpc client // It also loads the model -func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string, string) (*Model, error) { +func (ml *ModelLoader) grpcModel(backend string, autodetect bool, o *Options) func(string, string, string) (*Model, error) { return func(modelID, modelName, modelFile string) (*Model, error) { log.Debug().Msgf("Loading Model %s with gRPC (file: %s) (backend: %s): %+v", modelID, modelFile, backend, *o) @@ -324,9 +335,9 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string return nil, fmt.Errorf("refering to a backend not in asset dir: %s", err.Error()) } - if autoDetect { + if autodetect { // autoDetect GRPC process to start based on system capabilities - if selectedProcess := selectGRPCProcess(backend, o.assetDir, o.gRPCOptions.F16Memory); selectedProcess != "" { + if selectedProcess := selectGRPCProcessByHostCapabilities(backend, o.assetDir, o.gRPCOptions.F16Memory); selectedProcess != "" { grpcProcess = selectedProcess } } @@ -407,7 +418,11 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string } func (ml *ModelLoader) ListAvailableBackends(assetdir string) ([]string, error) { - return backendsInAssetDir(assetdir) + backends, err := backendsInAssetDir(assetdir) + if err != nil { + return nil, err + } + return orderBackends(backends) } func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err error) { @@ -421,13 +436,7 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e log.Debug().Msgf("%s is an alias of %s", backend, realBackend) } - if o.singleActiveBackend { - log.Debug().Msgf("Stopping all backends except '%s'", o.modelID) - err := ml.StopGRPC(allExcept(o.modelID)) - if err != nil { - log.Error().Err(err).Str("keptModel", o.modelID).Msg("error while shutting down all backends except for the keptModel") - } - } + ml.stopActiveBackends(o.modelID, o.singleActiveBackend) var backendToConsume string @@ -439,14 +448,40 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e backendToConsume = backend } - model, err := ml.LoadModel(o.modelID, o.model, ml.grpcModel(backendToConsume, o)) + model, err := ml.LoadModel(o.modelID, o.model, ml.grpcModel(backendToConsume, AutoDetect, o)) if err != nil { - return nil, err + // XXX: This is too backend specific(llama-cpp), remove this bit or generalize further + // We failed somehow starting the binary. For instance, could be that we are missing + // some libraries if running in binary-only mode. + // In this case, we attempt to load the model with the fallback variant. + + // If not llama-cpp backend, return error immediately + if backend != LLamaCPP { + return nil, err + } + + // Otherwise attempt with fallback + log.Error().Msgf("[%s] Failed loading model, trying with fallback '%s'", backend, LLamaCPPFallback) + model, err = ml.LoadModel(o.modelID, o.model, ml.grpcModel(LLamaCPPFallback, false, o)) + if err != nil { + return nil, err + } } return model.GRPC(o.parallelRequests, ml.wd), nil } +func (ml *ModelLoader) stopActiveBackends(modelID string, singleActiveBackend bool) { + // If we can have only one backend active, kill all the others (except external backends) + if singleActiveBackend { + log.Debug().Msgf("Stopping all backends except '%s'", modelID) + err := ml.StopGRPC(allExcept(modelID)) + if err != nil { + log.Error().Err(err).Str("keptModel", modelID).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing") + } + } +} + func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { o := NewOptions(opts...) @@ -458,19 +493,12 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { return m.GRPC(o.parallelRequests, ml.wd), nil } - // If we can have only one backend active, kill all the others (except external backends) - if o.singleActiveBackend { - log.Debug().Msgf("Stopping all backends except '%s'", o.modelID) - err := ml.StopGRPC(allExcept(o.modelID)) - if err != nil { - log.Error().Err(err).Str("keptModel", o.modelID).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing") - } - } + ml.stopActiveBackends(o.modelID, o.singleActiveBackend) var err error // get backends embedded in the binary - autoLoadBackends, err := backendsInAssetDir(o.assetDir) + autoLoadBackends, err := ml.ListAvailableBackends(o.assetDir) if err != nil { return nil, err } @@ -501,39 +529,6 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) { err = errors.Join(err, fmt.Errorf("backend %s returned no usable model", key)) log.Info().Msgf("[%s] Fails: %s", key, "backend returned no usable model") } - - if autoDetect && key == LLamaCPP && err != nil { - // try as hard as possible to run the llama.cpp variants - backendToUse := "" - if xsysinfo.HasCPUCaps(cpuid.AVX2) { - if _, err := os.Stat(backendPath(o.assetDir, LLamaCPPAVX2)); err == nil { - backendToUse = LLamaCPPAVX2 - } - } else if xsysinfo.HasCPUCaps(cpuid.AVX) { - if _, err := os.Stat(backendPath(o.assetDir, LLamaCPPAVX2)); err == nil { - backendToUse = LLamaCPPAVX - } - } else { - if _, err := os.Stat(backendPath(o.assetDir, LLamaCPPFallback)); err == nil { - backendToUse = LLamaCPPFallback - } else { - // If we don't have a fallback, just skip fallback - continue - } - } - - // Autodetection failed, try the fallback - log.Info().Msgf("[%s] Autodetection failed, trying the fallback", key) - options = append(options, WithBackendString(backendToUse)) - model, modelerr = ml.BackendLoader(options...) - if modelerr == nil && model != nil { - log.Info().Msgf("[%s] Loads OK", key) - return model, nil - } else { - err = errors.Join(err, fmt.Errorf("[%s]: %w", key, modelerr)) - log.Info().Msgf("[%s] Fails: %s", key, modelerr.Error()) - } - } } return nil, fmt.Errorf("could not load model - all backends returned error: %s", err.Error()) diff --git a/pkg/model/options.go b/pkg/model/loader_options.go similarity index 100% rename from pkg/model/options.go rename to pkg/model/loader_options.go From a0f0505f0da21fa75d8c3e8cc539d513940626bb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 11 Oct 2024 17:30:14 +0200 Subject: [PATCH 0302/1530] fix(welcome): do not list model twice if we have a config (#3790) Signed-off-by: Ettore Di Giacinto --- core/http/endpoints/localai/welcome.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/core/http/endpoints/localai/welcome.go b/core/http/endpoints/localai/welcome.go index 0518ceac..68f39e39 100644 --- a/core/http/endpoints/localai/welcome.go +++ b/core/http/endpoints/localai/welcome.go @@ -29,13 +29,20 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig, galleryConfigs[m.Name] = cfg } + modelsWithoutConfig := []string{} + for _, m := range models { + if _, ok := modelsWithBackendConfig[m]; !ok { + modelsWithoutConfig = append(modelsWithoutConfig, m) + } + } + // Get model statuses to display in the UI the operation in progress processingModels, taskTypes := modelStatus() summary := fiber.Map{ "Title": "LocalAI API - " + internal.PrintableVersion(), "Version": internal.PrintableVersion(), - "Models": models, + "Models": modelsWithoutConfig, "ModelsConfig": backendConfigs, "GalleryConfig": galleryConfigs, "IsP2PEnabled": p2p.IsP2PEnabled(), From 65ca754166a6c42f1b4f3104907afc8c04a4575d Mon Sep 17 00:00:00 2001 From: Dave Date: Fri, 11 Oct 2024 17:49:00 -0400 Subject: [PATCH 0303/1530] Fix: listmodelservice / welcome endpoint use LOOSE_ONLY (#3791) * fix list model service and welcome Signed-off-by: Dave Lee * comment Signed-off-by: Dave Lee --------- Signed-off-by: Dave Lee --- core/http/endpoints/localai/welcome.go | 12 +----------- core/services/list_models.go | 16 +++++++++------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/core/http/endpoints/localai/welcome.go b/core/http/endpoints/localai/welcome.go index 68f39e39..a1476886 100644 --- a/core/http/endpoints/localai/welcome.go +++ b/core/http/endpoints/localai/welcome.go @@ -13,15 +13,10 @@ import ( func WelcomeEndpoint(appConfig *config.ApplicationConfig, cl *config.BackendConfigLoader, ml *model.ModelLoader, modelStatus func() (map[string]string, map[string]string)) func(*fiber.Ctx) error { return func(c *fiber.Ctx) error { - models, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED) backendConfigs := cl.GetAllBackendConfigs() - galleryConfigs := map[string]*gallery.Config{} - modelsWithBackendConfig := map[string]interface{}{} for _, m := range backendConfigs { - modelsWithBackendConfig[m.Name] = nil - cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name) if err != nil { continue @@ -29,12 +24,7 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig, galleryConfigs[m.Name] = cfg } - modelsWithoutConfig := []string{} - for _, m := range models { - if _, ok := modelsWithBackendConfig[m]; !ok { - modelsWithoutConfig = append(modelsWithoutConfig, m) - } - } + modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY) // Get model statuses to display in the UI the operation in progress processingModels, taskTypes := modelStatus() diff --git a/core/services/list_models.go b/core/services/list_models.go index c310ac15..ef555d22 100644 --- a/core/services/list_models.go +++ b/core/services/list_models.go @@ -8,10 +8,10 @@ import ( type LooseFilePolicy int const ( - SKIP_IF_CONFIGURED LooseFilePolicy = iota + LOOSE_ONLY LooseFilePolicy = iota + SKIP_IF_CONFIGURED SKIP_ALWAYS ALWAYS_INCLUDE - LOOSE_ONLY ) func ListModels(bcl *config.BackendConfigLoader, ml *model.ModelLoader, filter config.BackendConfigFilterFn, looseFilePolicy LooseFilePolicy) ([]string, error) { @@ -21,11 +21,13 @@ func ListModels(bcl *config.BackendConfigLoader, ml *model.ModelLoader, filter c dataModels := []string{} // Start with known configurations - if looseFilePolicy != LOOSE_ONLY { - for _, c := range bcl.GetBackendConfigsByFilter(filter) { - if looseFilePolicy == SKIP_IF_CONFIGURED { - skipMap[c.Model] = nil - } + + for _, c := range bcl.GetBackendConfigsByFilter(filter) { + // Is this better than looseFilePolicy <= SKIP_IF_CONFIGURED ? less performant but more readable? + if (looseFilePolicy == SKIP_IF_CONFIGURED) || (looseFilePolicy == LOOSE_ONLY) { + skipMap[c.Model] = nil + } + if looseFilePolicy != LOOSE_ONLY { dataModels = append(dataModels, c.Name) } } From 6257e2f5101cb74ca309b2c15bea6fcc00c37add Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 12 Oct 2024 01:25:03 +0200 Subject: [PATCH 0304/1530] chore(deps): bump llama-cpp to 96776405a17034dcfd53d3ddf5d142d34bdbb657 (#3793) This adapts also to upstream changes Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/grpc-server.cpp | 76 +++++++++++++++---------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/Makefile b/Makefile index 0bb10f3c..3cbc6e10 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=0e9f760eb12546704ef8fa72577bc1a3ffe1bc04 +CPPLLAMA_VERSION?=96776405a17034dcfd53d3ddf5d142d34bdbb657 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index be99bf76..c61b9d4b 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -113,7 +113,7 @@ static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end) std::string ret; for (; begin != end; ++begin) { - ret += llama_token_to_piece(ctx, *begin); + ret += common_token_to_piece(ctx, *begin); } return ret; } @@ -121,7 +121,7 @@ static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end) // format incomplete utf-8 multibyte character for output static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token) { - std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token); + std::string out = token == -1 ? "" : common_token_to_piece(ctx, token); // if the size is 1 and first bit is 1, meaning it's a partial character // (size > 1 meaning it's already a known token) if (out.size() == 1 && (out[0] & 0x80) == 0x80) @@ -203,8 +203,8 @@ struct llama_client_slot std::string stopping_word; // sampling - struct gpt_sampler_params sparams; - gpt_sampler *ctx_sampling = nullptr; + struct common_sampler_params sparams; + common_sampler *ctx_sampling = nullptr; int32_t ga_i = 0; // group-attention state int32_t ga_n = 1; // group-attention factor @@ -257,7 +257,7 @@ struct llama_client_slot images.clear(); } - bool has_budget(gpt_params &global_params) { + bool has_budget(common_params &global_params) { if (params.n_predict == -1 && global_params.n_predict == -1) { return true; // limitless @@ -398,7 +398,7 @@ struct llama_server_context clip_ctx *clp_ctx = nullptr; - gpt_params params; + common_params params; llama_batch batch; @@ -441,7 +441,7 @@ struct llama_server_context } } - bool load_model(const gpt_params ¶ms_) + bool load_model(const common_params ¶ms_) { params = params_; if (!params.mmproj.empty()) { @@ -458,9 +458,9 @@ struct llama_server_context } } - llama_init_result llama_init = llama_init_from_gpt_params(params); - model = llama_init.model; - ctx = llama_init.context; + common_init_result common_init = common_init_from_params(params); + model = common_init.model; + ctx = common_init.context; if (model == nullptr) { LOG_ERR("unable to load model: %s", params.model.c_str()); @@ -578,12 +578,12 @@ struct llama_server_context std::vector p; if (first) { - p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL); + p = common_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL); first = false; } else { - p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL); + p = common_tokenize(ctx, s, false, TMP_FORCE_SPECIAL); } prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end()); } @@ -600,7 +600,7 @@ struct llama_server_context else { auto s = json_prompt.template get(); - prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL); + prompt_tokens = common_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL); } return prompt_tokens; @@ -629,7 +629,7 @@ struct llama_server_context bool launch_slot_with_data(llama_client_slot* &slot, json data) { slot_params default_params; - gpt_sampler_params default_sparams; + common_sampler_params default_sparams; slot->params.stream = json_value(data, "stream", false); slot->params.cache_prompt = json_value(data, "cache_prompt", false); @@ -769,7 +769,7 @@ struct llama_server_context } else if (el[0].is_string()) { - auto toks = llama_tokenize(model, el[0].get(), false); + auto toks = common_tokenize(model, el[0].get(), false); for (auto tok : toks) { slot->sparams.logit_bias.push_back({tok, bias}); @@ -801,7 +801,7 @@ struct llama_server_context sampler_names.emplace_back(name); } } - slot->sparams.samplers = gpt_sampler_types_from_names(sampler_names, false); + slot->sparams.samplers = common_sampler_types_from_names(sampler_names, false); } else { @@ -885,9 +885,9 @@ struct llama_server_context if (slot->ctx_sampling != nullptr) { - gpt_sampler_free(slot->ctx_sampling); + common_sampler_free(slot->ctx_sampling); } - slot->ctx_sampling = gpt_sampler_init(model, slot->sparams); + slot->ctx_sampling = common_sampler_init(model, slot->sparams); //llama_set_rng_seed(ctx, slot->params.seed); slot->command = LOAD_PROMPT; @@ -914,13 +914,13 @@ struct llama_server_context system_tokens.clear(); if (!system_prompt.empty()) { - system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token); + system_tokens = common_tokenize(ctx, system_prompt, add_bos_token); - llama_batch_clear(batch); + common_batch_clear(batch); for (int i = 0; i < (int)system_tokens.size(); ++i) { - llama_batch_add(batch, system_tokens[i], i, { 0 }, false); + common_batch_add(batch, system_tokens[i], i, { 0 }, false); } for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += params.n_batch) @@ -1009,7 +1009,7 @@ struct llama_server_context bool process_token(completion_token_output &result, llama_client_slot &slot) { // remember which tokens were sampled - used for repetition penalties during sampling - const std::string token_str = llama_token_to_piece(ctx, result.tok); + const std::string token_str = common_token_to_piece(ctx, result.tok); slot.sampled = result.tok; // search stop word and delete it @@ -1160,7 +1160,7 @@ struct llama_server_context samplers.reserve(slot.sparams.samplers.size()); for (const auto & sampler : slot.sparams.samplers) { - samplers.emplace_back(gpt_sampler_type_to_str(sampler)); + samplers.emplace_back(common_sampler_type_to_str(sampler)); } return json { @@ -1216,7 +1216,7 @@ struct llama_server_context if (slot.sparams.n_probs > 0) { std::vector probs_output = {}; - const std::vector to_send_toks = llama_tokenize(ctx, tkn.text_to_send, false); + const std::vector to_send_toks = common_tokenize(ctx, tkn.text_to_send, false); size_t probs_pos = std::min(slot.sent_token_probs_index, slot.generated_token_probs.size()); size_t probs_stop_pos = std::min(slot.sent_token_probs_index + to_send_toks.size(), slot.generated_token_probs.size()); if (probs_pos < probs_stop_pos) @@ -1268,7 +1268,7 @@ struct llama_server_context std::vector probs = {}; if (!slot.params.stream && slot.stopped_word) { - const std::vector stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false); + const std::vector stop_word_toks = common_tokenize(ctx, slot.stopping_word, false); probs = std::vector(slot.generated_token_probs.begin(), slot.generated_token_probs.end() - stop_word_toks.size()); } else @@ -1408,7 +1408,7 @@ struct llama_server_context } image_idx++; - llama_batch_clear(batch); + common_batch_clear(batch); // append prefix of next image const auto json_prompt = (image_idx >= (int) slot.images.size()) ? @@ -1418,7 +1418,7 @@ struct llama_server_context std::vector append_tokens = tokenize(json_prompt, false); // has next image for (int i = 0; i < (int) append_tokens.size(); ++i) { - llama_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true); + common_batch_add(batch, append_tokens[i], system_tokens.size() + slot.n_past, { slot.id }, true); slot.n_past += 1; } } @@ -1550,7 +1550,7 @@ struct llama_server_context update_system_prompt(); } - llama_batch_clear(batch); + common_batch_clear(batch); if (all_slots_are_idle) { @@ -1628,7 +1628,7 @@ struct llama_server_context // TODO: we always have to take into account the "system_tokens" // this is not great and needs to be improved somehow - llama_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true); + common_batch_add(batch, slot.sampled, system_tokens.size() + slot_npast, { slot.id }, true); slot.n_past += 1; } @@ -1722,7 +1722,7 @@ struct llama_server_context if (!slot.params.cache_prompt) { - gpt_sampler_reset(slot.ctx_sampling); + common_sampler_reset(slot.ctx_sampling); slot.n_past = 0; slot.n_past_se = 0; @@ -1734,7 +1734,7 @@ struct llama_server_context // push the prompt into the sampling context (do not apply grammar) for (auto &token : prompt_tokens) { - gpt_sampler_accept(slot.ctx_sampling, token, false); + common_sampler_accept(slot.ctx_sampling, token, false); } slot.n_past = common_part(slot.cache_tokens, prompt_tokens); @@ -1826,7 +1826,7 @@ struct llama_server_context ga_i += ga_w/ga_n; } } - llama_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, {slot.id }, false); + common_batch_add(batch, prefix_tokens[slot.n_past], system_tokens.size() + slot_npast, {slot.id }, false); slot_npast++; } @@ -1943,9 +1943,9 @@ struct llama_server_context } completion_token_output result; - const llama_token id = gpt_sampler_sample(slot.ctx_sampling, ctx, slot.i_batch - i); + const llama_token id = common_sampler_sample(slot.ctx_sampling, ctx, slot.i_batch - i); - gpt_sampler_accept(slot.ctx_sampling, id, true); + common_sampler_accept(slot.ctx_sampling, id, true); slot.n_decoded += 1; if (slot.n_decoded == 1) @@ -1956,7 +1956,7 @@ struct llama_server_context } result.tok = id; - const auto * cur_p = gpt_sampler_get_candidates(slot.ctx_sampling); + const auto * cur_p = common_sampler_get_candidates(slot.ctx_sampling); for (size_t i = 0; i < (size_t) slot.sparams.n_probs; ++i) { result.probs.push_back({ @@ -2009,7 +2009,7 @@ static json format_partial_response( struct token_translator { llama_context * ctx; - std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); } + std::string operator()(llama_token tok) const { return common_token_to_piece(ctx, tok); } std::string operator()(const completion_token_output &cto) const { return (*this)(cto.tok); } }; @@ -2203,7 +2203,7 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama // } static void params_parse(const backend::ModelOptions* request, - gpt_params & params) { + common_params & params) { // this is comparable to: https://github.com/ggerganov/llama.cpp/blob/d9b33fe95bd257b36c84ee5769cc048230067d6f/examples/server/server.cpp#L1809 @@ -2311,7 +2311,7 @@ public: grpc::Status LoadModel(ServerContext* context, const backend::ModelOptions* request, backend::Result* result) { // Implement LoadModel RPC - gpt_params params; + common_params params; params_parse(request, params); llama_backend_init(); From a1634b219a4e52813e70ff07e6376a01449c4515 Mon Sep 17 00:00:00 2001 From: Dave Date: Sat, 12 Oct 2024 03:45:47 -0400 Subject: [PATCH 0305/1530] fix: roll out bluemonday Sanitize more widely (#3794) * initial pass: roll out bluemonday sanitization more widely Signed-off-by: Dave Lee * add one additional sanitize - the overall modelslist used by the docs site Signed-off-by: Dave Lee --------- Signed-off-by: Dave Lee --- .github/ci/modelslist.go | 7 +++++++ core/http/elements/gallery.go | 23 ++++++++++------------- core/http/endpoints/openai/assistant.go | 17 +++++++++-------- core/http/endpoints/openai/files.go | 13 +++++++------ core/http/middleware/auth.go | 3 ++- core/http/routes/ui.go | 3 ++- 6 files changed, 37 insertions(+), 29 deletions(-) diff --git a/.github/ci/modelslist.go b/.github/ci/modelslist.go index cdc31703..719cd094 100644 --- a/.github/ci/modelslist.go +++ b/.github/ci/modelslist.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" + "github.com/microcosm-cc/bluemonday" "gopkg.in/yaml.v3" ) @@ -279,6 +280,12 @@ func main() { return } + // Ensure that all arbitrary text content is sanitized before display + for i, m := range models { + models[i].Name = bluemonday.StrictPolicy().Sanitize(m.Name) + models[i].Description = bluemonday.StrictPolicy().Sanitize(m.Description) + } + // render the template data := struct { Models []*GalleryModel diff --git a/core/http/elements/gallery.go b/core/http/elements/gallery.go index 91a12310..06076bd9 100644 --- a/core/http/elements/gallery.go +++ b/core/http/elements/gallery.go @@ -6,6 +6,7 @@ import ( "github.com/chasefleming/elem-go" "github.com/chasefleming/elem-go/attrs" + "github.com/microcosm-cc/bluemonday" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/p2p" "github.com/mudler/LocalAI/core/services" @@ -41,7 +42,7 @@ func DoneProgress(galleryID, text string, showDelete bool) string { "tabindex": "-1", "autofocus": "", }, - elem.Text(text), + elem.Text(bluemonday.StrictPolicy().Sanitize(text)), ), elem.If(showDelete, deleteButton(galleryID, modelName), reInstallButton(galleryID)), ).Render() @@ -57,7 +58,7 @@ func ErrorProgress(err, galleryName string) string { "tabindex": "-1", "autofocus": "", }, - elem.Text("Error "+err), + elem.Text("Error "+bluemonday.StrictPolicy().Sanitize(err)), ), installButton(galleryName), ).Render() @@ -170,7 +171,7 @@ func P2PNodeBoxes(nodes []p2p.NodeData) string { attrs.Props{ "class": "text-gray-200 font-semibold ml-2 mr-1", }, - elem.Text(n.ID), + elem.Text(bluemonday.StrictPolicy().Sanitize(n.ID)), ), elem.Text("Status: "), elem.If( @@ -227,7 +228,7 @@ func StartProgressBar(uid, progress, text string) string { "tabindex": "-1", "autofocus": "", }, - elem.Text(text), + elem.Text(bluemonday.StrictPolicy().Sanitize(text)), //Perhaps overly defensive elem.Div(attrs.Props{ "hx-get": "/browse/job/progress/" + uid, "hx-trigger": "every 600ms", @@ -249,9 +250,7 @@ func cardSpan(text, icon string) elem.Node { "class": icon + " pr-2", }), - elem.Text(text), - - //elem.Text(text), + elem.Text(bluemonday.StrictPolicy().Sanitize(text)), ) } @@ -285,11 +284,9 @@ func searchableElement(text, icon string) elem.Node { elem.I(attrs.Props{ "class": icon + " pr-2", }), - elem.Text(text), + elem.Text(bluemonday.StrictPolicy().Sanitize(text)), ), ), - - //elem.Text(text), ) } @@ -303,7 +300,7 @@ func link(text, url string) elem.Node { elem.I(attrs.Props{ "class": "fas fa-link pr-2", }), - elem.Text(text), + elem.Text(bluemonday.StrictPolicy().Sanitize(text)), ) } func installButton(galleryName string) elem.Node { @@ -387,13 +384,13 @@ func ListModels(models []*gallery.GalleryModel, processTracker ProcessTracker, g attrs.Props{ "class": "mb-2 text-xl font-bold leading-tight", }, - elem.Text(m.Name), + elem.Text(bluemonday.StrictPolicy().Sanitize(m.Name)), ), elem.P( attrs.Props{ "class": "mb-4 text-sm [&:not(:hover)]:truncate text-base", }, - elem.Text(m.Description), + elem.Text(bluemonday.StrictPolicy().Sanitize(m.Description)), ), ) } diff --git a/core/http/endpoints/openai/assistant.go b/core/http/endpoints/openai/assistant.go index 3240e8ee..1d83066a 100644 --- a/core/http/endpoints/openai/assistant.go +++ b/core/http/endpoints/openai/assistant.go @@ -10,6 +10,7 @@ import ( "time" "github.com/gofiber/fiber/v2" + "github.com/microcosm-cc/bluemonday" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" "github.com/mudler/LocalAI/core/services" @@ -83,7 +84,7 @@ func CreateAssistantEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoad if !modelExists(cl, ml, request.Model) { log.Warn().Msgf("Model: %s was not found in list of models.", request.Model) - return c.Status(fiber.StatusBadRequest).SendString("Model " + request.Model + " not found") + return c.Status(fiber.StatusBadRequest).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Model %q not found", request.Model))) } if request.Tools == nil { @@ -147,7 +148,7 @@ func ListAssistantsEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoade // Convert string limit to integer limit, err := strconv.Atoi(limitQuery) if err != nil { - return c.Status(http.StatusBadRequest).SendString(fmt.Sprintf("Invalid limit query value: %s", limitQuery)) + return c.Status(http.StatusBadRequest).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Invalid limit query value: %s", limitQuery))) } // Sort assistants @@ -288,7 +289,7 @@ func GetAssistantEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, } } - return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find assistant with id: %s", assistantID)) + return c.Status(fiber.StatusNotFound).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to find assistant with id: %s", assistantID))) } } @@ -337,11 +338,11 @@ func CreateAssistantFileEndpoint(cl *config.BackendConfigLoader, ml *model.Model } } - return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find file_id: %s", request.FileID)) + return c.Status(fiber.StatusNotFound).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to find file_id: %s", request.FileID))) } } - return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find %q", assistantID)) + return c.Status(fiber.StatusNotFound).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to find %q", assistantID))) } } @@ -442,7 +443,7 @@ func ModifyAssistantEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoad return c.Status(fiber.StatusOK).JSON(newAssistant) } } - return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find assistant with id: %s", assistantID)) + return c.Status(fiber.StatusNotFound).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to find assistant with id: %s", assistantID))) } } @@ -513,9 +514,9 @@ func GetAssistantFileEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoa if assistantFile.ID == fileId { return c.Status(fiber.StatusOK).JSON(assistantFile) } - return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find assistant file with file_id: %s", fileId)) + return c.Status(fiber.StatusNotFound).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to find assistant file with file_id: %s", fileId))) } } - return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find assistant file with assistant_id: %s", assistantID)) + return c.Status(fiber.StatusNotFound).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to find assistant file with assistant_id: %s", assistantID))) } } diff --git a/core/http/endpoints/openai/files.go b/core/http/endpoints/openai/files.go index 903484b4..bc392e73 100644 --- a/core/http/endpoints/openai/files.go +++ b/core/http/endpoints/openai/files.go @@ -8,6 +8,7 @@ import ( "sync/atomic" "time" + "github.com/microcosm-cc/bluemonday" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/schema" @@ -49,7 +50,7 @@ func UploadFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.Appli err = c.SaveFile(file, savePath) if err != nil { - return c.Status(fiber.StatusInternalServerError).SendString("Failed to save file: " + err.Error()) + return c.Status(fiber.StatusInternalServerError).SendString("Failed to save file: " + bluemonday.StrictPolicy().Sanitize(err.Error())) } f := schema.File{ @@ -121,7 +122,7 @@ func GetFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.Applicat return func(c *fiber.Ctx) error { file, err := getFileFromRequest(c) if err != nil { - return c.Status(fiber.StatusInternalServerError).SendString(err.Error()) + return c.Status(fiber.StatusInternalServerError).SendString(bluemonday.StrictPolicy().Sanitize(err.Error())) } return c.JSON(file) @@ -143,14 +144,14 @@ func DeleteFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.Appli return func(c *fiber.Ctx) error { file, err := getFileFromRequest(c) if err != nil { - return c.Status(fiber.StatusInternalServerError).SendString(err.Error()) + return c.Status(fiber.StatusInternalServerError).SendString(bluemonday.StrictPolicy().Sanitize(err.Error())) } err = os.Remove(filepath.Join(appConfig.UploadDir, file.Filename)) if err != nil { // If the file doesn't exist then we should just continue to remove it if !errors.Is(err, os.ErrNotExist) { - return c.Status(fiber.StatusInternalServerError).SendString(fmt.Sprintf("Unable to delete file: %s, %v", file.Filename, err)) + return c.Status(fiber.StatusInternalServerError).SendString(bluemonday.StrictPolicy().Sanitize(fmt.Sprintf("Unable to delete file: %s, %v", file.Filename, err))) } } @@ -180,12 +181,12 @@ func GetFilesContentsEndpoint(cm *config.BackendConfigLoader, appConfig *config. return func(c *fiber.Ctx) error { file, err := getFileFromRequest(c) if err != nil { - return c.Status(fiber.StatusInternalServerError).SendString(err.Error()) + return c.Status(fiber.StatusInternalServerError).SendString(bluemonday.StrictPolicy().Sanitize(err.Error())) } fileContents, err := os.ReadFile(filepath.Join(appConfig.UploadDir, file.Filename)) if err != nil { - return c.Status(fiber.StatusInternalServerError).SendString(err.Error()) + return c.Status(fiber.StatusInternalServerError).SendString(bluemonday.StrictPolicy().Sanitize(err.Error())) } return c.Send(fileContents) diff --git a/core/http/middleware/auth.go b/core/http/middleware/auth.go index d2152e9b..8f5fe2fb 100644 --- a/core/http/middleware/auth.go +++ b/core/http/middleware/auth.go @@ -7,6 +7,7 @@ import ( "github.com/dave-gray101/v2keyauth" "github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2/middleware/keyauth" + "github.com/microcosm-cc/bluemonday" "github.com/mudler/LocalAI/core/config" ) @@ -38,7 +39,7 @@ func getApiKeyErrorHandler(applicationConfig *config.ApplicationConfig) fiber.Er if applicationConfig.OpaqueErrors { return ctx.SendStatus(403) } - return ctx.Status(403).SendString(err.Error()) + return ctx.Status(403).SendString(bluemonday.StrictPolicy().Sanitize(err.Error())) } if applicationConfig.OpaqueErrors { return ctx.SendStatus(500) diff --git a/core/http/routes/ui.go b/core/http/routes/ui.go index cfe9368c..6ea38f35 100644 --- a/core/http/routes/ui.go +++ b/core/http/routes/ui.go @@ -6,6 +6,7 @@ import ( "sort" "strings" + "github.com/microcosm-cc/bluemonday" "github.com/mudler/LocalAI/core/config" "github.com/mudler/LocalAI/core/gallery" "github.com/mudler/LocalAI/core/http/elements" @@ -171,7 +172,7 @@ func RegisterUIRoutes(app *fiber.App, Search string `form:"search"` }{} if err := c.BodyParser(&form); err != nil { - return c.Status(fiber.StatusBadRequest).SendString(err.Error()) + return c.Status(fiber.StatusBadRequest).SendString(bluemonday.StrictPolicy().Sanitize(err.Error())) } models, _ := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.ModelPath) From b8b1e10f34ea37dfb5eac5b0e48925744b5ed3a1 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 12 Oct 2024 23:41:06 +0200 Subject: [PATCH 0306/1530] docs: :arrow_up: update docs version mudler/LocalAI (#3796) :arrow_up: Update docs version mudler/LocalAI Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- docs/data/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data/version.json b/docs/data/version.json index 470991b8..1814b362 100644 --- a/docs/data/version.json +++ b/docs/data/version.json @@ -1,3 +1,3 @@ { - "version": "v2.21.1" + "version": "v2.22.0" } From 465f1f14a7af87396ef6d3cc0068742d11e73f46 Mon Sep 17 00:00:00 2001 From: Dave Date: Sat, 12 Oct 2024 18:07:43 -0400 Subject: [PATCH 0307/1530] chore: dependabot ignore generated grpc go package (#3795) Signed-off-by: Dave Lee --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5016ebdb..fcd6c88c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,6 +9,8 @@ updates: directory: "/" schedule: interval: "weekly" + ignore: + - dependency-name: "github.com/mudler/LocalAI/pkg/grpc/proto" - package-ecosystem: "github-actions" # Workflow files stored in the default location of `.github/workflows`. (You don't need to specify `/.github/workflows` for `directory`. You can use `directory: "/"`.) directory: "/" From b46f36195fce89e906ad69a3cf5aef8f73018323 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 13 Oct 2024 00:07:54 +0200 Subject: [PATCH 0308/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `edc265661cd707327297b6ec4d83423c43cb50a5` (#3797) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3cbc6e10..6b216dd5 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=96776405a17034dcfd53d3ddf5d142d34bdbb657 +CPPLLAMA_VERSION?=edc265661cd707327297b6ec4d83423c43cb50a5 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From a909f63fbe108220e999bc41ba87df456688658b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 00:50:00 +0000 Subject: [PATCH 0309/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `d5a0ee0` to `e1a1f01` (#3798) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `d5a0ee0` to `e1a1f01`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/d5a0ee04ad986394d6d2f1e1a57f2334d24bf317...e1a1f01f4c34b1980e07f81dd0c690e0969c00ca) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index d5a0ee04..e1a1f01f 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit d5a0ee04ad986394d6d2f1e1a57f2334d24bf317 +Subproject commit e1a1f01f4c34b1980e07f81dd0c690e0969c00ca From 7eab6ba71b777587ffc26f3b86a6e1cc2c47c07d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:04:31 +0200 Subject: [PATCH 0310/1530] chore(deps): Bump mxschmitt/action-tmate from 3.18 to 3.19 (#3799) Bumps [mxschmitt/action-tmate](https://github.com/mxschmitt/action-tmate) from 3.18 to 3.19. - [Release notes](https://github.com/mxschmitt/action-tmate/releases) - [Changelog](https://github.com/mxschmitt/action-tmate/blob/master/RELEASE.md) - [Commits](https://github.com/mxschmitt/action-tmate/compare/v3.18...v3.19) --- updated-dependencies: - dependency-name: mxschmitt/action-tmate dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/notify-models.yaml | 4 ++-- .github/workflows/release.yaml | 8 ++++---- .github/workflows/test.yml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/notify-models.yaml b/.github/workflows/notify-models.yaml index d6a7b210..e17ee7fc 100644 --- a/.github/workflows/notify-models.yaml +++ b/.github/workflows/notify-models.yaml @@ -79,7 +79,7 @@ jobs: args: ${{ steps.summarize.outputs.message }} - name: Setup tmate session if fails if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 @@ -161,7 +161,7 @@ jobs: TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }} - name: Setup tmate session if fails if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a1318b19..47a69b0f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -123,7 +123,7 @@ jobs: release/* - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 @@ -232,7 +232,7 @@ jobs: release/* - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 @@ -308,7 +308,7 @@ jobs: release/* - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 @@ -350,7 +350,7 @@ jobs: release/* - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b62f86ef..f1078706 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -133,7 +133,7 @@ jobs: PATH="$PATH:/root/go/bin" GO_TAGS="stablediffusion tts" make --jobs 5 --output-sync=target test - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 @@ -197,7 +197,7 @@ jobs: make run-e2e-aio - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 @@ -235,7 +235,7 @@ jobs: BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test - name: Setup tmate session if tests fail if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3.18 + uses: mxschmitt/action-tmate@v3.19 with: detached: true connect-timeout-seconds: 180 From 4858e72fd957f913f1d62bf76543d921b15e5690 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:04:56 +0200 Subject: [PATCH 0311/1530] chore(deps): Bump sentence-transformers from 3.1.1 to 3.2.0 in /backend/python/sentencetransformers (#3801) chore(deps): Bump sentence-transformers Bumps [sentence-transformers](https://github.com/UKPLab/sentence-transformers) from 3.1.1 to 3.2.0. - [Release notes](https://github.com/UKPLab/sentence-transformers/releases) - [Commits](https://github.com/UKPLab/sentence-transformers/compare/v3.1.1...v3.2.0) --- updated-dependencies: - dependency-name: sentence-transformers dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/sentencetransformers/requirements-cpu.txt | 2 +- backend/python/sentencetransformers/requirements-cublas11.txt | 2 +- backend/python/sentencetransformers/requirements-cublas12.txt | 2 +- backend/python/sentencetransformers/requirements-hipblas.txt | 2 +- backend/python/sentencetransformers/requirements-intel.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/python/sentencetransformers/requirements-cpu.txt b/backend/python/sentencetransformers/requirements-cpu.txt index 0fd8f35e..bc0e899e 100644 --- a/backend/python/sentencetransformers/requirements-cpu.txt +++ b/backend/python/sentencetransformers/requirements-cpu.txt @@ -2,5 +2,5 @@ torch accelerate transformers bitsandbytes -sentence-transformers==3.1.1 +sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas11.txt b/backend/python/sentencetransformers/requirements-cublas11.txt index 92a10b16..7cd277f7 100644 --- a/backend/python/sentencetransformers/requirements-cublas11.txt +++ b/backend/python/sentencetransformers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 torch accelerate -sentence-transformers==3.1.1 +sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas12.txt b/backend/python/sentencetransformers/requirements-cublas12.txt index f68bb1b9..aa289073 100644 --- a/backend/python/sentencetransformers/requirements-cublas12.txt +++ b/backend/python/sentencetransformers/requirements-cublas12.txt @@ -1,4 +1,4 @@ torch accelerate -sentence-transformers==3.1.1 +sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-hipblas.txt b/backend/python/sentencetransformers/requirements-hipblas.txt index 920eb855..793bea16 100644 --- a/backend/python/sentencetransformers/requirements-hipblas.txt +++ b/backend/python/sentencetransformers/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 torch accelerate -sentence-transformers==3.1.1 +sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-intel.txt b/backend/python/sentencetransformers/requirements-intel.txt index 6ae4bdd4..f234e978 100644 --- a/backend/python/sentencetransformers/requirements-intel.txt +++ b/backend/python/sentencetransformers/requirements-intel.txt @@ -4,5 +4,5 @@ torch optimum[openvino] setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 accelerate -sentence-transformers==3.1.1 +sentence-transformers==3.2.0 transformers \ No newline at end of file From 5bca02bad482c30589eacfc89dc55e37fe099924 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:05:26 +0200 Subject: [PATCH 0312/1530] chore(deps): Bump langchain from 0.3.2 to 0.3.3 in /examples/langchain/langchainpy-localai-example (#3803) chore(deps): Bump langchain Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.2 to 0.3.3. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.2...langchain==0.3.3) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index a50fd991..5542d90e 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -10,7 +10,7 @@ debugpy==1.8.6 frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 -langchain==0.3.2 +langchain==0.3.3 langchain-community==0.3.1 marshmallow==3.22.0 marshmallow-enum==1.5.1 From a282bd49690b93fbd3fd6c0a8b9cce030c773f94 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:05:54 +0200 Subject: [PATCH 0313/1530] chore(deps): Bump llama-index from 0.11.16 to 0.11.17 in /examples/langchain-chroma (#3804) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.16 to 0.11.17. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.16...v0.11.17) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 302eb041..e0efcb99 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.1 openai==1.51.1 chromadb==0.5.11 -llama-index==0.11.16 \ No newline at end of file +llama-index==0.11.17 \ No newline at end of file From 803e2db30b7f47bc5336ec27b31751ed7b1b342d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:06:21 +0200 Subject: [PATCH 0314/1530] chore(deps): Bump python from 3.12-bullseye to 3.13-bullseye in /examples/langchain (#3805) chore(deps): Bump python in /examples/langchain Bumps python from 3.12-bullseye to 3.13-bullseye. --- updated-dependencies: - dependency-name: python dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/PY.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/PY.Dockerfile b/examples/langchain/PY.Dockerfile index efc7a876..865aec60 100644 --- a/examples/langchain/PY.Dockerfile +++ b/examples/langchain/PY.Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.12-bullseye +FROM python:3.13-bullseye COPY ./langchainpy-localai-example /app WORKDIR /app RUN pip install --no-cache-dir -r requirements.txt From 3f923bb2ce2e7f97393fa175d16173cbc0c9da5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:06:48 +0200 Subject: [PATCH 0315/1530] chore(deps): Bump openai from 1.51.1 to 1.51.2 in /examples/functions (#3806) Bumps [openai](https://github.com/openai/openai-python) from 1.51.1 to 1.51.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.51.1...v1.51.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 9e988435..4a8a8aad 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.2 -openai==1.51.1 +openai==1.51.2 From 086f9e1f071918e6f0c78d9511ad1fb9df6914fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:07:29 +0200 Subject: [PATCH 0316/1530] chore(deps): Bump llama-index from 0.11.16 to 0.11.17 in /examples/chainlit (#3807) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.16 to 0.11.17. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.16...v0.11.17) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index c21991df..0d60c193 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.16 +llama_index==0.11.17 requests==2.32.3 weaviate_client==4.8.1 transformers From 814cc24b69c30e7ed27f2f6fb38db772cc7ce709 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:08:14 +0200 Subject: [PATCH 0317/1530] chore(deps): Bump langchain from 0.3.1 to 0.3.3 in /examples/langchain-chroma (#3809) chore(deps): Bump langchain in /examples/langchain-chroma Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.1 to 0.3.3. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.1...langchain==0.3.3) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index e0efcb99..cfeb802e 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ -langchain==0.3.1 +langchain==0.3.3 openai==1.51.1 chromadb==0.5.11 llama-index==0.11.17 \ No newline at end of file From 975c579d44c3a0de5f5513d5930621b32c0a79eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:09:05 +0200 Subject: [PATCH 0318/1530] chore(deps): Bump openai from 1.51.1 to 1.51.2 in /examples/langchain/langchainpy-localai-example (#3808) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.51.1 to 1.51.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.51.1...v1.51.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 5542d90e..8810f819 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.1 -openai==1.51.1 +openai==1.51.2 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.9.2 From 9695969913fc632c67c9fdb95bead17cb0ec8d79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:09:48 +0200 Subject: [PATCH 0319/1530] chore(deps): Bump yarl from 1.13.1 to 1.15.1 in /examples/langchain/langchainpy-localai-example (#3816) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.13.1 to 1.15.1. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.13.1...v1.15.1) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 8810f819..3e43bb3a 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.3 -yarl==1.13.1 +yarl==1.15.1 From a1a86aa1f77cfb698f20b103dc6b63ca2f209aaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:10:26 +0200 Subject: [PATCH 0320/1530] chore(deps): Bump chromadb from 0.5.11 to 0.5.13 in /examples/langchain-chroma (#3811) chore(deps): Bump chromadb in /examples/langchain-chroma Bumps [chromadb](https://github.com/chroma-core/chroma) from 0.5.11 to 0.5.13. - [Release notes](https://github.com/chroma-core/chroma/releases) - [Changelog](https://github.com/chroma-core/chroma/blob/main/RELEASE_PROCESS.md) - [Commits](https://github.com/chroma-core/chroma/compare/0.5.11...0.5.13) --- updated-dependencies: - dependency-name: chromadb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index cfeb802e..2c82aa52 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.3 openai==1.51.1 -chromadb==0.5.11 +chromadb==0.5.13 llama-index==0.11.17 \ No newline at end of file From d5d9e78983985d006dd00fa53df53bfb8cfb23ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:10:51 +0200 Subject: [PATCH 0321/1530] chore(deps): Bump langchain from 0.3.2 to 0.3.3 in /examples/functions (#3802) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.2 to 0.3.3. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.2...langchain==0.3.3) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 4a8a8aad..48e6a25a 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.3.2 +langchain==0.3.3 openai==1.51.2 From 181fa93168650ac4eb1af418492fa0b136aace93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:12:38 +0200 Subject: [PATCH 0322/1530] chore(deps): Bump debugpy from 1.8.6 to 1.8.7 in /examples/langchain/langchainpy-localai-example (#3814) chore(deps): Bump debugpy Bumps [debugpy](https://github.com/microsoft/debugpy) from 1.8.6 to 1.8.7. - [Release notes](https://github.com/microsoft/debugpy/releases) - [Commits](https://github.com/microsoft/debugpy/compare/v1.8.6...v1.8.7) --- updated-dependencies: - dependency-name: debugpy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 3e43bb3a..a4ebe1e2 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -6,7 +6,7 @@ certifi==2024.8.30 charset-normalizer==3.3.2 colorama==0.4.6 dataclasses-json==0.6.7 -debugpy==1.8.6 +debugpy==1.8.7 frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 From fd493a4451e4630b71f443657e5ef56c4d9af9c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 10:13:06 +0200 Subject: [PATCH 0323/1530] chore(deps): Bump aiohttp from 3.10.9 to 3.10.10 in /examples/langchain/langchainpy-localai-example (#3812) chore(deps): Bump aiohttp Bumps [aiohttp](https://github.com/aio-libs/aiohttp) from 3.10.9 to 3.10.10. - [Release notes](https://github.com/aio-libs/aiohttp/releases) - [Changelog](https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/aiohttp/compare/v3.10.9...v3.10.10) --- updated-dependencies: - dependency-name: aiohttp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index a4ebe1e2..4fa90249 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -1,4 +1,4 @@ -aiohttp==3.10.9 +aiohttp==3.10.10 aiosignal==1.3.1 async-timeout==4.0.3 attrs==24.2.0 From f65930422732facc54f6ab0b8a48b926a7b4e956 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 11:24:59 +0200 Subject: [PATCH 0324/1530] chore(deps): Bump openai from 1.51.1 to 1.51.2 in /examples/langchain-chroma (#3810) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.51.1 to 1.51.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.51.1...v1.51.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 2c82aa52..367cee06 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.3 -openai==1.51.1 +openai==1.51.2 chromadb==0.5.13 llama-index==0.11.17 \ No newline at end of file From f4dab8291935787d83fa1a3adb4de2f2d8a1bb51 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 13 Oct 2024 23:07:00 +0200 Subject: [PATCH 0325/1530] models(gallery): add llama-3_8b_unaligned_beta (#3818) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c25ccc02..88bdcb09 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -5175,6 +5175,26 @@ - filename: L3-8B-Niitama-v1.i1-Q4_K_M.gguf sha256: 8c62f831db2a6e34aa75459fe8a98815199ecc2dac1892a460b8b86363b6826e uri: huggingface://mradermacher/L3-8B-Niitama-v1-i1-GGUF/L3-8B-Niitama-v1.i1-Q4_K_M.gguf +- !!merge <<: *llama3 + icon: https://huggingface.co/SicariusSicariiStuff/LLAMA-3_8B_Unaligned_BETA/resolve/main/Images/LLAMA-3_8B_Unaligned_BETA.png + name: "llama-3_8b_unaligned_beta" + urls: + - https://huggingface.co/SicariusSicariiStuff/LLAMA-3_8B_Unaligned_BETA + - https://huggingface.co/bartowski/LLAMA-3_8B_Unaligned_BETA-GGUF + description: | + In the Wild West of the AI world, the real titans never hit their deadlines, no sir! + The projects that finish on time? They’re the soft ones—basic, surface-level shenanigans. But the serious projects? They’re always delayed. You set a date, then reality hits: not gonna happen, scope creep that mutates the roadmap, unexpected turn of events that derails everything. + It's only been 4 months since the Alpha was released, and half a year since the project started, but it felt like nearly a decade. + Deadlines shift, but with each delay, you’re not failing—you’re refining, and becoming more ambitious. A project that keeps getting pushed isn’t late; it’s just gaining weight, becoming something worth building, and truly worth seeing all the way through. The longer it’s delayed, the more serious it gets. + LLAMA-3_8B_Unaligned is a serious project, and thank god, the Beta is finally here. + I love you all unconditionally, thanks for all the support and kind words! + overrides: + parameters: + model: LLAMA-3_8B_Unaligned_BETA-Q4_K_M.gguf + files: + - filename: LLAMA-3_8B_Unaligned_BETA-Q4_K_M.gguf + sha256: 5b88fb4537339996c04e4a1b6ef6a2d555c4103b6378e273ae9c6c5e77af67eb + uri: huggingface://bartowski/LLAMA-3_8B_Unaligned_BETA-GGUF/LLAMA-3_8B_Unaligned_BETA-Q4_K_M.gguf - &chatml ### ChatML url: "github:mudler/LocalAI/gallery/chatml.yaml@master" From 43146fa607c0059aa9903a653425d251340423a5 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Mon, 14 Oct 2024 08:29:14 +0200 Subject: [PATCH 0326/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `d4c19c0f5cdb1e512573e8c86c79e8d0238c73c4` (#3817) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6b216dd5..739e0f9a 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=edc265661cd707327297b6ec4d83423c43cb50a5 +CPPLLAMA_VERSION?=d4c19c0f5cdb1e512573e8c86c79e8d0238c73c4 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From d53e71021f83c32d0094fe333933b62678015114 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 08:50:02 +0200 Subject: [PATCH 0327/1530] models(gallery): add llama3.1-flammades-70b (#3819) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 88bdcb09..9f7389c2 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1412,6 +1412,21 @@ - filename: NIHAPPY-L3.1-8B-v0.09.Q4_K_M.gguf sha256: 9bd46a06093448b143bd2775f0fb1b1b172c851fafdce31289e13b7dfc23a0d7 uri: huggingface://QuantFactory/NIHAPPY-L3.1-8B-v0.09-GGUF/NIHAPPY-L3.1-8B-v0.09.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama3.1-flammades-70b" + icon: https://huggingface.co/flammenai/Flammades-Mistral-7B/resolve/main/flammades.png?download=true + urls: + - https://huggingface.co/flammenai/Llama3.1-Flammades-70B + - https://huggingface.co/mradermacher/Llama3.1-Flammades-70B-GGUF + description: | + nbeerbower/Llama3.1-Gutenberg-Doppel-70B finetuned on flammenai/Date-DPO-NoAsterisks and jondurbin/truthy-dpo-v0.1. + overrides: + parameters: + model: Llama3.1-Flammades-70B.Q4_K_M.gguf + files: + - filename: Llama3.1-Flammades-70B.Q4_K_M.gguf + sha256: f602ed006d0059ac87c6ce5904a7cc6f4b4f290886a1049f96b5b2c561ab5a89 + uri: huggingface://mradermacher/Llama3.1-Flammades-70B-GGUF/Llama3.1-Flammades-70B.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From b6f681315a69e986e24a23aae306d1fdcd71761d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 08:54:31 +0200 Subject: [PATCH 0328/1530] models(gallery): add llama3.1-gutenberg-doppel-70b (#3820) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 9f7389c2..877cd637 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1427,6 +1427,23 @@ - filename: Llama3.1-Flammades-70B.Q4_K_M.gguf sha256: f602ed006d0059ac87c6ce5904a7cc6f4b4f290886a1049f96b5b2c561ab5a89 uri: huggingface://mradermacher/Llama3.1-Flammades-70B-GGUF/Llama3.1-Flammades-70B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama3.1-gutenberg-doppel-70b" + # chatml + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://huggingface.co/nbeerbower/Mistral-Small-Gutenberg-Doppel-22B/resolve/main/doppel-header?download=true + urls: + - https://huggingface.co/nbeerbower/Llama3.1-Gutenberg-Doppel-70B + - https://huggingface.co/mradermacher/Llama3.1-Gutenberg-Doppel-70B-GGUF + description: | + mlabonne/Hermes-3-Llama-3.1-70B-lorablated finetuned on jondurbin/gutenberg-dpo-v0.1 and nbeerbower/gutenberg2-dpo. + overrides: + parameters: + model: Llama3.1-Gutenberg-Doppel-70B.Q4_K_M.gguf + files: + - filename: Llama3.1-Gutenberg-Doppel-70B.Q4_K_M.gguf + sha256: af558f954fa26c5bb75352178cb815bbf268f01c0ca0b96f2149422d4c19511b + uri: huggingface://mradermacher/Llama3.1-Gutenberg-Doppel-70B-GGUF/Llama3.1-Gutenberg-Doppel-70B.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 8ec828a65443bf19f4735040ac01162dd0a36206 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 08:59:41 +0200 Subject: [PATCH 0329/1530] models(gallery): add llama-3.1-8b-arliai-formax-v1.0-iq-arm-imatrix (#3821) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 877cd637..979bcdea 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1444,6 +1444,25 @@ - filename: Llama3.1-Gutenberg-Doppel-70B.Q4_K_M.gguf sha256: af558f954fa26c5bb75352178cb815bbf268f01c0ca0b96f2149422d4c19511b uri: huggingface://mradermacher/Llama3.1-Gutenberg-Doppel-70B-GGUF/Llama3.1-Gutenberg-Doppel-70B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama-3.1-8b-arliai-formax-v1.0-iq-arm-imatrix" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://iili.io/2HmlLn2.md.png + urls: + - https://huggingface.co/Lewdiculous/Llama-3.1-8B-ArliAI-Formax-v1.0-GGUF-IQ-ARM-Imatrix + description: | + Quants for ArliAI/Llama-3.1-8B-ArliAI-Formax-v1.0. + + "Formax is a model that specializes in following response format instructions. Tell it the format of it's response and it will follow it perfectly. Great for data processing and dataset creation tasks." + + "It is also a highly uncensored model that will follow your instructions very well." + overrides: + parameters: + model: Llama-3.1-8B-ArliAI-Formax-v1.0-Q4_K_M-imat.gguf + files: + - filename: Llama-3.1-8B-ArliAI-Formax-v1.0-Q4_K_M-imat.gguf + sha256: b548ad47caf7008a697afb3556190359529f5a05ec0e4e48ef992c7869e14255 + uri: huggingface://Lewdiculous/Llama-3.1-8B-ArliAI-Formax-v1.0-GGUF-IQ-ARM-Imatrix/Llama-3.1-8B-ArliAI-Formax-v1.0-Q4_K_M-imat.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 93ba5ea14f31e642eee554685250083564b8b1c7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 09:00:37 +0200 Subject: [PATCH 0330/1530] models(gallery): add supernova-medius (#3822) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 979bcdea..3bbdfec3 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -472,6 +472,21 @@ - filename: qwen2.5-7b-ins-v3-Q4_K_M.gguf sha256: 9c23734072714a4886c0386ae0ff07a5e940d67ad52278e2ed689fec44e1e0c8 uri: huggingface://bartowski/qwen2.5-7b-ins-v3-GGUF/qwen2.5-7b-ins-v3-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "supernova-medius" + urls: + - https://huggingface.co/arcee-ai/SuperNova-Medius-GGUF + description: | + Arcee-SuperNova-Medius is a 14B parameter language model developed by Arcee.ai, built on the Qwen2.5-14B-Instruct architecture. This unique model is the result of a cross-architecture distillation pipeline, combining knowledge from both the Qwen2.5-72B-Instruct model and the Llama-3.1-405B-Instruct model. By leveraging the strengths of these two distinct architectures, SuperNova-Medius achieves high-quality instruction-following and complex reasoning capabilities in a mid-sized, resource-efficient form. + + SuperNova-Medius is designed to excel in a variety of business use cases, including customer support, content creation, and technical assistance, while maintaining compatibility with smaller hardware configurations. It’s an ideal solution for organizations looking for advanced capabilities without the high resource requirements of larger models like our SuperNova-70B. + overrides: + parameters: + model: SuperNova-Medius-Q4_K_M.gguf + files: + - filename: SuperNova-Medius-Q4_K_M.gguf + sha256: aaa4bf3451bc900f186fd4b6b3a6a26bfd40c85908f605db76b92e58aadcc864 + uri: huggingface://arcee-ai/SuperNova-Medius-GGUF/SuperNova-Medius-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From 304484c59ba3c8437dccab31d59c7394951ca5c0 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 10:17:23 +0200 Subject: [PATCH 0331/1530] models(gallery): add hermes-3-llama-3.1-70b-lorablated (#3823) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 3bbdfec3..56c53b5d 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1478,6 +1478,24 @@ - filename: Llama-3.1-8B-ArliAI-Formax-v1.0-Q4_K_M-imat.gguf sha256: b548ad47caf7008a697afb3556190359529f5a05ec0e4e48ef992c7869e14255 uri: huggingface://Lewdiculous/Llama-3.1-8B-ArliAI-Formax-v1.0-GGUF-IQ-ARM-Imatrix/Llama-3.1-8B-ArliAI-Formax-v1.0-Q4_K_M-imat.gguf +- !!merge <<: *llama31 + name: "hermes-3-llama-3.1-70b-lorablated" + icon: https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/4Hbw5n68jKUSBQeTqQIeT.png + urls: + - https://huggingface.co/mlabonne/Hermes-3-Llama-3.1-70B-lorablated + - https://huggingface.co/mradermacher/Hermes-3-Llama-3.1-70B-lorablated-GGUF + description: | + This is an uncensored version of NousResearch/Hermes-3-Llama-3.1-70B using lorablation. + The recipe is based on @grimjim's grimjim/Llama-3.1-8B-Instruct-abliterated_via_adapter (special thanks): + Extraction: We extract a LoRA adapter by comparing two models: a censored Llama 3 (meta-llama/Meta-Llama-3-70B-Instruct) and an abliterated Llama 3.1 (failspy/Meta-Llama-3.1-70B-Instruct-abliterated). + Merge: We merge this new LoRA adapter using task arithmetic to the censored NousResearch/Hermes-3-Llama-3.1-70B to abliterate it. + overrides: + parameters: + model: Hermes-3-Llama-3.1-70B-lorablated.Q4_K_M.gguf + files: + - filename: Hermes-3-Llama-3.1-70B-lorablated.Q4_K_M.gguf + sha256: 9294875ae3b8822855072b0f710ce800536d144cf303a91bcb087c4a307b578d + uri: huggingface://mradermacher/Hermes-3-Llama-3.1-70B-lorablated-GGUF/Hermes-3-Llama-3.1-70B-lorablated.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 61388317c1399c65ed58dec61cae58a149c5d741 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 10:28:56 +0200 Subject: [PATCH 0332/1530] models(gallery): add hermes-3-llama-3.1-8b-lorablated (#3824) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 56c53b5d..f69fc54c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1496,6 +1496,22 @@ - filename: Hermes-3-Llama-3.1-70B-lorablated.Q4_K_M.gguf sha256: 9294875ae3b8822855072b0f710ce800536d144cf303a91bcb087c4a307b578d uri: huggingface://mradermacher/Hermes-3-Llama-3.1-70B-lorablated-GGUF/Hermes-3-Llama-3.1-70B-lorablated.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "hermes-3-llama-3.1-8b-lorablated" + urls: + - https://huggingface.co/mlabonne/Hermes-3-Llama-3.1-8B-lorablated-GGUF + description: | + This is an uncensored version of NousResearch/Hermes-3-Llama-3.1-8B using lorablation. + The recipe is simple: + Extraction: We extract a LoRA adapter by comparing two models: a censored Llama 3.1 (meta-llama/Meta-Llama-3-8B-Instruct) and an abliterated Llama 3.1 (mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated). + Merge: We merge this new LoRA adapter using task arithmetic to the censored NousResearch/Hermes-3-Llama-3.1-8B to abliterate it. + overrides: + parameters: + model: hermes-3-llama-3.1-8b-lorablated.Q4_K_M.gguf + files: + - filename: hermes-3-llama-3.1-8b-lorablated.Q4_K_M.gguf + sha256: 8cff9d399a0583616fe1f290da6daa091ab5c5493d0e173a8fffb45202d79417 + uri: huggingface://mlabonne/Hermes-3-Llama-3.1-8B-lorablated-GGUF/hermes-3-llama-3.1-8b-lorablated.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 0a198e32de62d13b97a6e16921e56361076526e9 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 10:53:28 +0200 Subject: [PATCH 0333/1530] models(gallery): add eva-qwen2.5-14b-v0.1-i1 (#3825) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f69fc54c..53646601 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -487,6 +487,21 @@ - filename: SuperNova-Medius-Q4_K_M.gguf sha256: aaa4bf3451bc900f186fd4b6b3a6a26bfd40c85908f605db76b92e58aadcc864 uri: huggingface://arcee-ai/SuperNova-Medius-GGUF/SuperNova-Medius-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "eva-qwen2.5-14b-v0.1-i1" + urls: + - https://huggingface.co/EVA-UNIT-01/EVA-Qwen2.5-14B-v0.1 + - https://huggingface.co/mradermacher/EVA-Qwen2.5-14B-v0.1-i1-GGUF + description: | + A RP/storywriting specialist model, full-parameter finetune of Qwen2.5-14B on mixture of synthetic and natural data. + It uses Celeste 70B 0.1 data mixture, greatly expanding it to improve versatility, creativity and "flavor" of the resulting model. + overrides: + parameters: + model: EVA-Qwen2.5-14B-v0.1.i1-Q4_K_M.gguf + files: + - filename: EVA-Qwen2.5-14B-v0.1.i1-Q4_K_M.gguf + sha256: 4e9665d4f83cd97efb42c8427f9c09be93b72e23a0364c91ad0b5de8056f2795 + uri: huggingface://mradermacher/EVA-Qwen2.5-14B-v0.1-i1-GGUF/EVA-Qwen2.5-14B-v0.1.i1-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From 88115e4ddb91fb76c0e1ffaa8b760a45ef85217a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 11:06:11 +0200 Subject: [PATCH 0334/1530] models(gallery): add cursorcore-qw2.5-7b-i1 (#3826) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 53646601..d9781f67 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -502,6 +502,20 @@ - filename: EVA-Qwen2.5-14B-v0.1.i1-Q4_K_M.gguf sha256: 4e9665d4f83cd97efb42c8427f9c09be93b72e23a0364c91ad0b5de8056f2795 uri: huggingface://mradermacher/EVA-Qwen2.5-14B-v0.1-i1-GGUF/EVA-Qwen2.5-14B-v0.1.i1-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "cursorcore-qw2.5-7b-i1" + urls: + - https://huggingface.co/TechxGenus/CursorCore-QW2.5-7B + - https://huggingface.co/mradermacher/CursorCore-QW2.5-7B-i1-GGUF + description: | + CursorCore is a series of open-source models designed for AI-assisted programming. It aims to support features such as automated editing and inline chat, replicating the core abilities of closed-source AI-assisted programming tools like Cursor. This is achieved by aligning data generated through Programming-Instruct. Please read our paper to learn more. + overrides: + parameters: + model: CursorCore-QW2.5-7B.i1-Q4_K_M.gguf + files: + - filename: CursorCore-QW2.5-7B.i1-Q4_K_M.gguf + sha256: 81868f4edb4ec1a61debde1dbdebc02b407930ee19a6d946ff801afba840a102 + uri: huggingface://mradermacher/CursorCore-QW2.5-7B-i1-GGUF/CursorCore-QW2.5-7B.i1-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From d2a5a58e111f86b412cfff95fe6f5a75e6eacca0 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 11:10:34 +0200 Subject: [PATCH 0335/1530] models(gallery): add cursorcore-qw2.5-1.5b-lc-i1 (#3827) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d9781f67..a05d4475 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -516,6 +516,20 @@ - filename: CursorCore-QW2.5-7B.i1-Q4_K_M.gguf sha256: 81868f4edb4ec1a61debde1dbdebc02b407930ee19a6d946ff801afba840a102 uri: huggingface://mradermacher/CursorCore-QW2.5-7B-i1-GGUF/CursorCore-QW2.5-7B.i1-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "cursorcore-qw2.5-1.5b-lc-i1" + urls: + - https://huggingface.co/TechxGenus/CursorCore-QW2.5-1.5B-LC + - https://huggingface.co/mradermacher/CursorCore-QW2.5-1.5B-LC-i1-GGUF + description: | + CursorCore is a series of open-source models designed for AI-assisted programming. It aims to support features such as automated editing and inline chat, replicating the core abilities of closed-source AI-assisted programming tools like Cursor. This is achieved by aligning data generated through Programming-Instruct. Please read our paper to learn more. + overrides: + parameters: + model: CursorCore-QW2.5-1.5B-LC.i1-Q4_K_M.gguf + files: + - filename: CursorCore-QW2.5-1.5B-LC.i1-Q4_K_M.gguf + sha256: 185d720c810f7345ef861ad8eef1199bb15afa8e4f3c03bd5ffd476cfa465127 + uri: huggingface://mradermacher/CursorCore-QW2.5-1.5B-LC-i1-GGUF/CursorCore-QW2.5-1.5B-LC.i1-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From cb47a03880828b30f4081d4589217b3c026ce72e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 11:14:14 +0200 Subject: [PATCH 0336/1530] models(gallery): add cursorcore-ds-6.7b-i1 (#3828) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index a05d4475..d37c585d 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1580,6 +1580,20 @@ - filename: DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf sha256: 50ec78036433265965ed1afd0667c00c71c12aa70bcf383be462cb8e159db6c0 uri: huggingface://LoneStriker/DeepSeek-Coder-V2-Lite-Instruct-GGUF/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf +- !!merge <<: *deepseek + name: "cursorcore-ds-6.7b-i1" + urls: + - https://huggingface.co/TechxGenus/CursorCore-DS-6.7B + - https://huggingface.co/mradermacher/CursorCore-DS-6.7B-i1-GGUF + description: | + CursorCore is a series of open-source models designed for AI-assisted programming. It aims to support features such as automated editing and inline chat, replicating the core abilities of closed-source AI-assisted programming tools like Cursor. This is achieved by aligning data generated through Programming-Instruct. Please read our paper to learn more. + overrides: + parameters: + model: CursorCore-DS-6.7B.i1-Q4_K_M.gguf + files: + - filename: CursorCore-DS-6.7B.i1-Q4_K_M.gguf + sha256: 71b94496be79e5bc45c23d6aa6c242f5f1d3625b4f00fe91d781d381ef35c538 + uri: huggingface://mradermacher/CursorCore-DS-6.7B-i1-GGUF/CursorCore-DS-6.7B.i1-Q4_K_M.gguf - name: "archangel_sft_pythia2-8b" url: "github:mudler/LocalAI/gallery/tuluv2.yaml@master" icon: https://gist.github.com/assets/29318529/fe2d8391-dbd1-4b7e-9dc4-7cb97e55bc06 From dcf28e6a281be857d4027f686fb73fb33817bd8a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 11:20:09 +0200 Subject: [PATCH 0337/1530] models(gallery): add cursorcore-yi-9b (#3829) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d37c585d..2d1be87f 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4405,6 +4405,19 @@ - filename: Yi-Coder-9B.Q4_K_M.gguf sha256: cff3db8a69c43654e3c2d2984e86ad2791d1d446ec56b24a636ba1ce78363308 uri: huggingface://QuantFactory/Yi-Coder-9B-GGUF/Yi-Coder-9B.Q4_K_M.gguf +- !!merge <<: *yi-chat + name: "cursorcore-yi-9b" + urls: + - https://huggingface.co/mradermacher/CursorCore-Yi-9B-GGUF + description: | + CursorCore is a series of open-source models designed for AI-assisted programming. It aims to support features such as automated editing and inline chat, replicating the core abilities of closed-source AI-assisted programming tools like Cursor. This is achieved by aligning data generated through Programming-Instruct. Please read our paper to learn more. + overrides: + parameters: + model: CursorCore-Yi-9B.Q4_K_M.gguf + files: + - filename: CursorCore-Yi-9B.Q4_K_M.gguf + sha256: 943bf59b34bee34afae8390c1791ccbc7c742e11a4d04d538a699754eb92215e + uri: huggingface://mradermacher/CursorCore-Yi-9B-GGUF/CursorCore-Yi-9B.Q4_K_M.gguf - &vicuna-chat ## LLama2 and derivatives ### Start Fimbulvetr From 1e3cef67745f6ac6ae521b73ed9b6e988b6f6996 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 14 Oct 2024 11:22:29 +0200 Subject: [PATCH 0338/1530] models(gallery): add edgerunner-command-nested-i1 (#3830) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 2d1be87f..99d941a9 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -530,6 +530,20 @@ - filename: CursorCore-QW2.5-1.5B-LC.i1-Q4_K_M.gguf sha256: 185d720c810f7345ef861ad8eef1199bb15afa8e4f3c03bd5ffd476cfa465127 uri: huggingface://mradermacher/CursorCore-QW2.5-1.5B-LC-i1-GGUF/CursorCore-QW2.5-1.5B-LC.i1-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "edgerunner-command-nested-i1" + urls: + - https://huggingface.co/edgerunner-ai/EdgeRunner-Command-Nested + - https://huggingface.co/mradermacher/EdgeRunner-Command-Nested-i1-GGUF + description: | + EdgeRunner-Command-Nested is an advanced large language model designed specifically for handling complex nested function calls. Initialized from Qwen2.5-7B-Instruct, further enhanced by the integration of the Hermes function call template and additional training on a specialized dataset (based on TinyAgent). This extra dataset focuses on personal domain applications, providing the model with a robust understanding of nested function scenarios that are typical in complex user interactions. + overrides: + parameters: + model: EdgeRunner-Command-Nested.i1-Q4_K_M.gguf + files: + - filename: EdgeRunner-Command-Nested.i1-Q4_K_M.gguf + sha256: a1cc4d2b601dc20e58cbb549bd3e9bc460995840c0aaf1cd3c1cb5414c900ac7 + uri: huggingface://mradermacher/EdgeRunner-Command-Nested-i1-GGUF/EdgeRunner-Command-Nested.i1-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From f9903d850f9118b82c0396844d530c72685b6480 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 20:05:36 +0000 Subject: [PATCH 0339/1530] chore(deps): Bump charset-normalizer from 3.3.2 to 3.4.0 in /examples/langchain/langchainpy-localai-example (#3834) chore(deps): Bump charset-normalizer Bumps [charset-normalizer](https://github.com/Ousret/charset_normalizer) from 3.3.2 to 3.4.0. - [Release notes](https://github.com/Ousret/charset_normalizer/releases) - [Changelog](https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md) - [Commits](https://github.com/Ousret/charset_normalizer/compare/3.3.2...3.4.0) --- updated-dependencies: - dependency-name: charset-normalizer dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 4fa90249..ff0bcce6 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -3,7 +3,7 @@ aiosignal==1.3.1 async-timeout==4.0.3 attrs==24.2.0 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 colorama==0.4.6 dataclasses-json==0.6.7 debugpy==1.8.7 From ddd289d1af5a40852d3f22ea8e26341453b33a20 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 15 Oct 2024 00:03:40 +0200 Subject: [PATCH 0340/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `a89f75e1b7b90cb2d4d4c52ca53ef9e9b466aa45` (#3837) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 739e0f9a..47c5de4f 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=d4c19c0f5cdb1e512573e8c86c79e8d0238c73c4 +CPPLLAMA_VERSION?=a89f75e1b7b90cb2d4d4c52ca53ef9e9b466aa45 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 13e7432b890c630ea7b888ff2aed7d5df4501b13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 09:39:55 +0200 Subject: [PATCH 0341/1530] chore(deps): Bump langchain-community from 0.3.1 to 0.3.2 in /examples/langchain/langchainpy-localai-example (#3831) chore(deps): Bump langchain-community Bumps [langchain-community](https://github.com/langchain-ai/langchain) from 0.3.1 to 0.3.2. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain-community==0.3.1...langchain-community==0.3.2) --- updated-dependencies: - dependency-name: langchain-community dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index ff0bcce6..647a95ca 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -11,7 +11,7 @@ frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 langchain==0.3.3 -langchain-community==0.3.1 +langchain-community==0.3.2 marshmallow==3.22.0 marshmallow-enum==1.5.1 multidict==6.1.0 From 53d1db1da0aec8eb070146198d24282dba4f20cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 09:40:06 +0200 Subject: [PATCH 0342/1530] chore(deps): Bump yarl from 1.15.1 to 1.15.2 in /examples/langchain/langchainpy-localai-example (#3832) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.15.1 to 1.15.2. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.15.1...v1.15.2) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 647a95ca..6916f4ce 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.3 -yarl==1.15.1 +yarl==1.15.2 From 18c35ee86f2990e0fec914cfa2de2cc62153015c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 09:40:30 +0200 Subject: [PATCH 0343/1530] chore(deps): Bump numpy from 2.1.1 to 2.1.2 in /examples/langchain/langchainpy-localai-example (#3833) chore(deps): Bump numpy Bumps [numpy](https://github.com/numpy/numpy) from 2.1.1 to 2.1.2. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/RELEASE_WALKTHROUGH.rst) - [Commits](https://github.com/numpy/numpy/compare/v2.1.1...v2.1.2) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 6916f4ce..d4dfe947 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -17,7 +17,7 @@ marshmallow-enum==1.5.1 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.10.1 -numpy==2.1.1 +numpy==2.1.2 openai==1.51.2 openapi-schema-pydantic==1.2.4 packaging>=23.2 From 18f9e11f1a35d29c688ce82e4a574f2d47f6d21d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 09:40:53 +0200 Subject: [PATCH 0344/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `e1a1f01` to `007cc20` (#3835) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `e1a1f01` to `007cc20`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/e1a1f01f4c34b1980e07f81dd0c690e0969c00ca...007cc20686f04ca1f911975f20f097175dd72a7f) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index e1a1f01f..007cc206 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit e1a1f01f4c34b1980e07f81dd0c690e0969c00ca +Subproject commit 007cc20686f04ca1f911975f20f097175dd72a7f From 094f808549ff812b2273636d3404edb4715eed49 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 15 Oct 2024 09:41:11 +0200 Subject: [PATCH 0345/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `06a1da9daff94c1bf1b1d38950628264fe443f76` (#3836) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 47c5de4f..d917e689 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=fdbfb460ed546452a5d53611bba66d10d842e719 +WHISPER_CPP_VERSION?=06a1da9daff94c1bf1b1d38950628264fe443f76 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 97cf028175037eb35f9519e2a26851572ec0eba7 Mon Sep 17 00:00:00 2001 From: Franco Lombardo Date: Tue, 15 Oct 2024 09:41:39 +0200 Subject: [PATCH 0346/1530] chore: update integrations.md with LLPhant (#3838) Signed-off-by: Franco Lombardo --- docs/content/docs/integrations.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/docs/integrations.md b/docs/content/docs/integrations.md index 50f683c3..0939ecdb 100644 --- a/docs/content/docs/integrations.md +++ b/docs/content/docs/integrations.md @@ -28,5 +28,6 @@ The list below is a list of software that integrates with LocalAI. - https://github.com/cedriking/spark - [Big AGI](https://github.com/enricoros/big-agi) is a powerful web interface entirely running in the browser, supporting LocalAI - [Midori AI Subsystem Manager](https://io.midori-ai.xyz/subsystem/manager/) is a powerful docker subsystem for running all types of AI programs +- [LLPhant](https://github.com/theodo-group/LLPhant) is a PHP library for interacting with LLMs and Vector Databases Feel free to open up a Pull request (by clicking at the "Edit page" below) to get a page for your project made or if you see a error on one of the pages! From b82577d6423b2c18a8d14b93d05b4f484a80a197 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 15 Oct 2024 09:41:53 +0200 Subject: [PATCH 0347/1530] fix(llama.cpp): consider also native builds (#3839) This is in order to identify also builds which are not using alternatives based on capabilities. For instance, there are cases when we build the backend only natively in the host. Signed-off-by: Ettore Di Giacinto --- pkg/model/initializers.go | 49 ++++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index c3b37179..bd668ec2 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -251,8 +251,22 @@ func selectGRPCProcessByHostCapabilities(backend, assetDir string, f16 bool) str // No GPU found or no specific binaries found, try to load the CPU variant(s) - // Select the Fallback by default - selectedProcess := backendPath(assetDir, LLamaCPPFallback) + // Select a binary based on availability/capability + selectedProcess := "" + + // Check if we have a native build (llama-cpp) and use that + if _, err := os.Stat(backendPath(assetDir, LLamaCPPFallback)); err == nil { + log.Debug().Msgf("[%s] %s variant available", LLamaCPPFallback, backend) + selectedProcess = backendPath(assetDir, LLamaCPPFallback) + } + + // Check if we have a native build (llama-cpp) and use that instead + // As a reminder, we do ultimately attempt again with the fallback variant + // If things fail with what we select here + if _, err := os.Stat(backendPath(assetDir, LLamaCPP)); err == nil { + log.Debug().Msgf("[%s] attempting to load with native variant", backend) + selectedProcess = backendPath(assetDir, LLamaCPP) + } // IF we find any optimized binary, we use that if xsysinfo.HasCPUCaps(cpuid.AVX2) { @@ -269,7 +283,7 @@ func selectGRPCProcessByHostCapabilities(backend, assetDir string, f16 bool) str } } - // Check if the binary exists! + // Safety measure: check if the binary exists otherwise return empty string if _, err := os.Stat(selectedProcess); err == nil { return selectedProcess } @@ -277,6 +291,21 @@ func selectGRPCProcessByHostCapabilities(backend, assetDir string, f16 bool) str return "" } +func attemptLoadingOnFailure(backend string, ml *ModelLoader, o *Options, err error) (*Model, error) { + // XXX: This is too backend specific(llama-cpp), remove this bit or generalize further + // We failed somehow starting the binary. For instance, could be that we are missing + // some libraries if running in binary-only mode. + // In this case, we attempt to load the model with the fallback variant. + + // If not llama-cpp backend, return the error immediately + if backend != LLamaCPP { + return nil, err + } + + log.Error().Msgf("[%s] Failed loading model, trying with fallback '%s', error: %s", backend, LLamaCPPFallback, err.Error()) + return ml.LoadModel(o.modelID, o.model, ml.grpcModel(LLamaCPPFallback, false, o)) +} + // starts the grpcModelProcess for the backend, and returns a grpc client // It also loads the model func (ml *ModelLoader) grpcModel(backend string, autodetect bool, o *Options) func(string, string, string) (*Model, error) { @@ -450,19 +479,7 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e model, err := ml.LoadModel(o.modelID, o.model, ml.grpcModel(backendToConsume, AutoDetect, o)) if err != nil { - // XXX: This is too backend specific(llama-cpp), remove this bit or generalize further - // We failed somehow starting the binary. For instance, could be that we are missing - // some libraries if running in binary-only mode. - // In this case, we attempt to load the model with the fallback variant. - - // If not llama-cpp backend, return error immediately - if backend != LLamaCPP { - return nil, err - } - - // Otherwise attempt with fallback - log.Error().Msgf("[%s] Failed loading model, trying with fallback '%s'", backend, LLamaCPPFallback) - model, err = ml.LoadModel(o.modelID, o.model, ml.grpcModel(LLamaCPPFallback, false, o)) + model, err = attemptLoadingOnFailure(backend, ml, o, err) if err != nil { return nil, err } From 5f130febb84ed812b145da68eca0bee2de32b875 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 15 Oct 2024 23:41:29 +0200 Subject: [PATCH 0348/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `b6049060dd2341b7816d2bce7dc7451c1665828e` (#3842) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d917e689..154ae558 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=06a1da9daff94c1bf1b1d38950628264fe443f76 +WHISPER_CPP_VERSION?=b6049060dd2341b7816d2bce7dc7451c1665828e # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 7ddf486b37d7fb13a4dd88070abbb14ad522a7ed Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 16 Oct 2024 09:01:29 +0200 Subject: [PATCH 0349/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `755a9b2bf00fbae988e03a47e852b66eaddd113a` (#3841) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 154ae558..34da8b57 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=a89f75e1b7b90cb2d4d4c52ca53ef9e9b466aa45 +CPPLLAMA_VERSION?=755a9b2bf00fbae988e03a47e852b66eaddd113a # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From f166541ac336612dc710bda488f6ca0f56f56567 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 09:12:58 +0200 Subject: [PATCH 0350/1530] models(gallery): add llama-3.2-chibi-3b (#3843) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 99d941a9..f13f9e5b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -182,6 +182,21 @@ - filename: Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO.Q4_K_M.gguf sha256: 7f45fa79bc6c9847ef9fbad08c3bb5a0f2dbb56d2e2200a5d37b260a57274e55 uri: huggingface://QuantFactory/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO-GGUF/Fireball-Meta-Llama-3.2-8B-Instruct-agent-003-128k-code-DPO.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama-3.2-chibi-3b" + icon: https://huggingface.co/AELLM/Llama-3.2-Chibi-3B/resolve/main/chibi.jpg + urls: + - https://huggingface.co/AELLM/Llama-3.2-Chibi-3B + - https://huggingface.co/mradermacher/Llama-3.2-Chibi-3B-GGUF + description: | + Small parameter LLMs are ideal for navigating the complexities of the Japanese language, which involves multiple character systems like kanji, hiragana, and katakana, along with subtle social cues. Despite their smaller size, these models are capable of delivering highly accurate and context-aware results, making them perfect for use in environments where resources are constrained. Whether deployed on mobile devices with limited processing power or in edge computing scenarios where fast, real-time responses are needed, these models strike the perfect balance between performance and efficiency, without sacrificing quality or speed. + overrides: + parameters: + model: Llama-3.2-Chibi-3B.Q4_K_M.gguf + files: + - filename: Llama-3.2-Chibi-3B.Q4_K_M.gguf + sha256: 4b594cd5f66181202713f1cf97ce2f86d0acfa1b862a64930d5f512c45640a2f + uri: huggingface://mradermacher/Llama-3.2-Chibi-3B-GGUF/Llama-3.2-Chibi-3B.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 6afe9c8fdaceb53aaf3bee95443694fe58a7cc5d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 09:15:10 +0200 Subject: [PATCH 0351/1530] models(gallery): add llama-3.2-3b-reasoning-time (#3844) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f13f9e5b..cca66040 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -197,6 +197,19 @@ - filename: Llama-3.2-Chibi-3B.Q4_K_M.gguf sha256: 4b594cd5f66181202713f1cf97ce2f86d0acfa1b862a64930d5f512c45640a2f uri: huggingface://mradermacher/Llama-3.2-Chibi-3B-GGUF/Llama-3.2-Chibi-3B.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama-3.2-3b-reasoning-time" + urls: + - https://huggingface.co/mradermacher/Llama-3.2-3B-Reasoning-Time-GGUF + description: | + Lyte/Llama-3.2-3B-Reasoning-Time is a large language model with 3.2 billion parameters, designed for reasoning and time-based tasks in English. It is based on the Llama architecture and has been quantized using the GGUF format by mradermacher. + overrides: + parameters: + model: Llama-3.2-3B-Reasoning-Time.Q4_K_M.gguf + files: + - filename: Llama-3.2-3B-Reasoning-Time.Q4_K_M.gguf + sha256: 80b10e1a5c6e27f6d8cf08c3472af2b15a9f63ebf8385eedfe8615f85116c73f + uri: huggingface://mradermacher/Llama-3.2-3B-Reasoning-Time-GGUF/Llama-3.2-3B-Reasoning-Time.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From c9f28e2b5665603ad070c66ef42c2498baa2114a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 09:34:12 +0200 Subject: [PATCH 0352/1530] models(gallery): add ml-ms-etheris-123b (#3845) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index cca66040..eaa7f993 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2221,6 +2221,27 @@ - filename: MN-BackyardAI-Party-12B-v1-Q4_K_M-imat.gguf sha256: cea68768dff58b553974b755bb40ef790ab8b86866d9b5c46bc2e6c3311b876a uri: huggingface://Lewdiculous/MN-BackyardAI-Party-12B-v1-GGUF-IQ-ARM-Imatrix/MN-BackyardAI-Party-12B-v1-Q4_K_M-imat.gguf +- !!merge <<: *mistral03 + name: "ml-ms-etheris-123b" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://cdn-uploads.huggingface.co/production/uploads/64545af5ec40bbbd01242ca6/ieEjL3TxpDM3WAZQcya6E.png + urls: + - https://huggingface.co/Steelskull/ML-MS-Etheris-123B + - https://huggingface.co/mradermacher/ML-MS-Etheris-123B-GGUF + description: | + This model merges the robust storytelling of mutiple models while attempting to maintain intelligence. The final model was merged after Model Soup with DELLA to add some specal sause. + - model: NeverSleep/Lumimaid-v0.2-123B + - model: TheDrummer/Behemoth-123B-v1 + - model: migtissera/Tess-3-Mistral-Large-2-123B + - model: anthracite-org/magnum-v2-123b + Use Mistral, ChatML, or Meth Format + overrides: + parameters: + model: ML-MS-Etheris-123B.Q2_K.gguf + files: + - filename: ML-MS-Etheris-123B.Q2_K.gguf + sha256: a17c5615413b5c9c8d01cf55386573d0acd00e01f6e2bcdf492624c73c593fc3 + uri: huggingface://mradermacher/ML-MS-Etheris-123B-GGUF/ML-MS-Etheris-123B.Q2_K.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From 41db6668f0a9c5511fbf0bbf58627f18363597eb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 09:34:57 +0200 Subject: [PATCH 0353/1530] models(gallery): add doctoraifinetune-3.1-8b-i1 (#3846) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index eaa7f993..198d8b2b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1597,6 +1597,29 @@ - filename: hermes-3-llama-3.1-8b-lorablated.Q4_K_M.gguf sha256: 8cff9d399a0583616fe1f290da6daa091ab5c5493d0e173a8fffb45202d79417 uri: huggingface://mlabonne/Hermes-3-Llama-3.1-8B-lorablated-GGUF/hermes-3-llama-3.1-8b-lorablated.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "doctoraifinetune-3.1-8b-i1" + urls: + - https://huggingface.co/huzaifa525/Doctoraifinetune-3.1-8B + - https://huggingface.co/mradermacher/Doctoraifinetune-3.1-8B-i1-GGUF + description: | + This is a fine-tuned version of the Meta-Llama-3.1-8B-bnb-4bit model, specifically adapted for the medical field. It has been trained using a dataset that provides extensive information on diseases, symptoms, and treatments, making it ideal for AI-powered healthcare tools such as medical chatbots, virtual assistants, and diagnostic support systems. + Key Features + + Disease Diagnosis: Accurately identifies diseases based on symptoms provided by the user. + Symptom Analysis: Breaks down and interprets symptoms to provide a comprehensive medical overview. + Treatment Recommendations: Suggests treatments and remedies according to medical conditions. + + Dataset + + The model is fine-tuned on 2000 rows from a dataset consisting of 272k rows. This dataset includes rich information about diseases, symptoms, and their corresponding treatments. The model is continuously being updated and will be further trained on the remaining data in future releases to improve accuracy and capabilities. + overrides: + parameters: + model: Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf + files: + - filename: Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf + sha256: 282456efcb6c7e54d34ac25ae7fc022a94152ed77281ae4625b9628091e0a3d6 + uri: huggingface://mradermacher/Doctoraifinetune-3.1-8B-i1-GGUF/Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 585e0745dae17670d0067038739ce7a5347976de Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 18:28:51 +0200 Subject: [PATCH 0354/1530] models(gallery): add astral-fusion-neural-happy-l3.1-8b (#3848) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 198d8b2b..80fec85b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1620,6 +1620,22 @@ - filename: Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf sha256: 282456efcb6c7e54d34ac25ae7fc022a94152ed77281ae4625b9628091e0a3d6 uri: huggingface://mradermacher/Doctoraifinetune-3.1-8B-i1-GGUF/Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "astral-fusion-neural-happy-l3.1-8b" + urls: + - https://huggingface.co/ZeroXClem/Astral-Fusion-Neural-Happy-L3.1-8B + - https://huggingface.co/mradermacher/Astral-Fusion-Neural-Happy-L3.1-8B-GGUF + description: | + Astral-Fusion-Neural-Happy-L3.1-8B is a celestial blend of magic, creativity, and dynamic storytelling. Designed to excel in instruction-following, immersive roleplaying, and magical narrative generation, this model is a fusion of the finest qualities from Astral-Fusion, NIHAPPY, and NeuralMahou. ✨🚀 + + This model is perfect for anyone seeking a cosmic narrative experience, with the ability to generate both precise instructional content and fantastical stories in one cohesive framework. Whether you're crafting immersive stories, creating AI roleplaying characters, or working on interactive storytelling, this model brings out the magic. 🌟 + overrides: + parameters: + model: Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf + files: + - filename: Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf + sha256: 14a3b07c1723ef1ca24f99382254b1227d95974541e23792a4e7ff621896055d + uri: huggingface://mradermacher/Astral-Fusion-Neural-Happy-L3.1-8B-GGUF/Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 773cec77a2aa63094c8c6b0b5e287ee4cd635a99 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 18:31:50 +0200 Subject: [PATCH 0355/1530] models(gallery): add tsunami-0.5x-7b-instruct-i1 (#3849) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 80fec85b..c2e105c4 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -572,6 +572,26 @@ - filename: EdgeRunner-Command-Nested.i1-Q4_K_M.gguf sha256: a1cc4d2b601dc20e58cbb549bd3e9bc460995840c0aaf1cd3c1cb5414c900ac7 uri: huggingface://mradermacher/EdgeRunner-Command-Nested-i1-GGUF/EdgeRunner-Command-Nested.i1-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "tsunami-0.5x-7b-instruct-i1" + icon: https://huggingface.co/Tsunami-th/Tsunami-0.5x-7B-Instruct/resolve/main/Tsunami.webp + urls: + - https://huggingface.co/Tsunami-th/Tsunami-0.5x-7B-Instruct + - https://huggingface.co/mradermacher/Tsunami-0.5x-7B-Instruct-i1-GGUF + description: | + TSUNAMI: Transformative Semantic Understanding and Natural Augmentation Model for Intelligence. + + TSUNAMI full name was created by ChatGPT. + infomation + + Tsunami-0.5x-7B-Instruct is Thai Large Language Model that fine-tuned from Qwen2.5-7B around 100,000 rows in Thai dataset. + overrides: + parameters: + model: Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf + files: + - filename: Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf + sha256: 22e2003ecec7f1e91f2e9aaec334613c0f37fb3000d0e628b5a9980e53322fa7 + uri: huggingface://mradermacher/Tsunami-0.5x-7B-Instruct-i1-GGUF/Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From fdf1452c6b5076e351838c79478b9ba8f409b39c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 18:37:01 +0200 Subject: [PATCH 0356/1530] models(gallery): add mahou-1.5-llama3.1-70b-i1 (#3850) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c2e105c4..b1dbf20b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1656,6 +1656,22 @@ - filename: Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf sha256: 14a3b07c1723ef1ca24f99382254b1227d95974541e23792a4e7ff621896055d uri: huggingface://mradermacher/Astral-Fusion-Neural-Happy-L3.1-8B-GGUF/Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "mahou-1.5-llama3.1-70b-i1" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + icon: https://huggingface.co/flammenai/Mahou-1.0-mistral-7B/resolve/main/mahou1.png + urls: + - https://huggingface.co/flammenai/Mahou-1.5-llama3.1-70B + - https://huggingface.co/mradermacher/Mahou-1.5-llama3.1-70B-i1-GGUF + description: | + Mahou is designed to provide short messages in a conversational context. It is capable of casual conversation and character roleplay. + overrides: + parameters: + model: Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf + files: + - filename: Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf + sha256: c2711c4c9c8d011edbeaa391b4418d433e273a318d1de3dbdda9b85baf4996f2 + uri: huggingface://mradermacher/Mahou-1.5-llama3.1-70B-i1-GGUF/Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 1b44a5a3b7a94f102ec0bfe11d2cfdfc5afac788 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 16 Oct 2024 18:39:28 +0200 Subject: [PATCH 0357/1530] chore(deps): bump grpcio to 1.67.0 (#3851) Signed-off-by: Ettore Di Giacinto --- backend/python/autogptq/requirements.txt | 2 +- backend/python/bark/requirements.txt | 2 +- backend/python/common/template/requirements.txt | 2 +- backend/python/coqui/requirements.txt | 2 +- backend/python/diffusers/requirements.txt | 2 +- backend/python/exllama2/requirements.txt | 2 +- backend/python/mamba/requirements.txt | 2 +- backend/python/openvoice/requirements-intel.txt | 2 +- backend/python/openvoice/requirements.txt | 2 +- backend/python/parler-tts/requirements.txt | 2 +- backend/python/rerankers/requirements.txt | 2 +- backend/python/sentencetransformers/requirements.txt | 2 +- backend/python/transformers-musicgen/requirements.txt | 2 +- backend/python/transformers/requirements.txt | 2 +- backend/python/vall-e-x/requirements.txt | 2 +- backend/python/vllm/install.sh | 2 +- backend/python/vllm/requirements.txt | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/backend/python/autogptq/requirements.txt b/backend/python/autogptq/requirements.txt index 9cb6ce94..7e66f084 100644 --- a/backend/python/autogptq/requirements.txt +++ b/backend/python/autogptq/requirements.txt @@ -1,6 +1,6 @@ accelerate auto-gptq==0.7.1 -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi transformers \ No newline at end of file diff --git a/backend/python/bark/requirements.txt b/backend/python/bark/requirements.txt index 6e46924a..d1a90719 100644 --- a/backend/python/bark/requirements.txt +++ b/backend/python/bark/requirements.txt @@ -1,4 +1,4 @@ bark==0.1.5 -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt index 540c0eb5..16716764 100644 --- a/backend/python/common/template/requirements.txt +++ b/backend/python/common/template/requirements.txt @@ -1,2 +1,2 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf \ No newline at end of file diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index 29484f7d..0ad62f70 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,4 @@ coqui-tts -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/diffusers/requirements.txt b/backend/python/diffusers/requirements.txt index 730e316f..624b048e 100644 --- a/backend/python/diffusers/requirements.txt +++ b/backend/python/diffusers/requirements.txt @@ -1,5 +1,5 @@ setuptools -grpcio==1.66.2 +grpcio==1.67.0 pillow protobuf certifi diff --git a/backend/python/exllama2/requirements.txt b/backend/python/exllama2/requirements.txt index e3db2b2f..8a0d9a17 100644 --- a/backend/python/exllama2/requirements.txt +++ b/backend/python/exllama2/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi wheel diff --git a/backend/python/mamba/requirements.txt b/backend/python/mamba/requirements.txt index 83ae4279..6be5d8ac 100644 --- a/backend/python/mamba/requirements.txt +++ b/backend/python/mamba/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index 687efe78..b446386f 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -2,7 +2,7 @@ intel-extension-for-pytorch torch optimum[openvino] -grpcio==1.66.2 +grpcio==1.67.0 protobuf librosa==0.9.1 faster-whisper==1.0.3 diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index 6ee29ce4..fd1268d0 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf librosa faster-whisper diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index d7f36feb..ff9adca9 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi llvmlite==0.43.0 \ No newline at end of file diff --git a/backend/python/rerankers/requirements.txt b/backend/python/rerankers/requirements.txt index 83ae4279..6be5d8ac 100644 --- a/backend/python/rerankers/requirements.txt +++ b/backend/python/rerankers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements.txt b/backend/python/sentencetransformers/requirements.txt index 40a387f1..36ce8a6f 100644 --- a/backend/python/sentencetransformers/requirements.txt +++ b/backend/python/sentencetransformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi datasets diff --git a/backend/python/transformers-musicgen/requirements.txt b/backend/python/transformers-musicgen/requirements.txt index a3f66651..ea0e3fa9 100644 --- a/backend/python/transformers-musicgen/requirements.txt +++ b/backend/python/transformers-musicgen/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf scipy==1.14.0 certifi \ No newline at end of file diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index 084cc034..d006cf0e 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements.txt b/backend/python/vall-e-x/requirements.txt index 83ae4279..6be5d8ac 100644 --- a/backend/python/vall-e-x/requirements.txt +++ b/backend/python/vall-e-x/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 9078b81b..69e74a06 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -22,7 +22,7 @@ if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then git clone https://github.com/vllm-project/vllm fi pushd vllm - uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.66.2 protobuf bitsandbytes + uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.67.0 protobuf bitsandbytes uv pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu VLLM_TARGET_DEVICE=cpu python setup.py install popd diff --git a/backend/python/vllm/requirements.txt b/backend/python/vllm/requirements.txt index 8fb8a418..95447f74 100644 --- a/backend/python/vllm/requirements.txt +++ b/backend/python/vllm/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.66.2 +grpcio==1.67.0 protobuf certifi setuptools \ No newline at end of file From a60b9b7a381d5ed7cd657f4d0ba042b931215e50 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:41:30 +0200 Subject: [PATCH 0358/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `9e041024481f6b249ab8918e18b9477f873b5a5e` (#3853) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 34da8b57..aea5e157 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=755a9b2bf00fbae988e03a47e852b66eaddd113a +CPPLLAMA_VERSION?=9e041024481f6b249ab8918e18b9477f873b5a5e # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 1a9299a7c026767d3ceb4f7a3dfe53f11e9d4c15 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 17 Oct 2024 09:21:54 +0200 Subject: [PATCH 0359/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `d3f7137cc9befa6d74dc4085de2b664b97b7c8bb` (#3852) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index aea5e157..374b0e00 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=b6049060dd2341b7816d2bce7dc7451c1665828e +WHISPER_CPP_VERSION?=d3f7137cc9befa6d74dc4085de2b664b97b7c8bb # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From cdcfb2617ccaaf306e29a4c21be0b79270145e29 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 09:46:26 +0200 Subject: [PATCH 0360/1530] Update README.md Signed-off-by: Ettore Di Giacinto --- README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/README.md b/README.md index 44beeb71..7647105b 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,19 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu # docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12 ``` +To load models: + +```bash +# Start LocalAI with the phi-2 model +local-ai run huggingface://TheBloke/phi-2-GGUF/phi-2.Q8_0.gguf +# Install and run a model from the Ollama OCI registry +local-ai run ollama://gemma:2b +# Run a model from a configuration file +local-ai run https://gist.githubusercontent.com/.../phi-2.yaml +# Install and run a model from a standard OCI registry (e.g., Docker Hub) +local-ai run oci://localai/phi-2:latest +``` + [💻 Getting started](https://localai.io/basics/getting_started/index.html) ## 📰 Latest project news From 92cd5388298a629f8d90d70e6b23b6c6cae3eda4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 09:56:07 +0200 Subject: [PATCH 0361/1530] models(gallery): add llama-3.1-nemotron-70b-instruct-hf (#3854) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index b1dbf20b..f0565013 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1672,6 +1672,28 @@ - filename: Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf sha256: c2711c4c9c8d011edbeaa391b4418d433e273a318d1de3dbdda9b85baf4996f2 uri: huggingface://mradermacher/Mahou-1.5-llama3.1-70B-i1-GGUF/Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama-3.1-nemotron-70b-instruct-hf" + urls: + - https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF + - https://huggingface.co/mradermacher/Llama-3.1-Nemotron-70B-Instruct-HF-GGUF + description: | + Llama-3.1-Nemotron-70B-Instruct is a large language model customized by NVIDIA to improve the helpfulness of LLM generated responses to user queries. + + This model reaches Arena Hard of 85.0, AlpacaEval 2 LC of 57.6 and GPT-4-Turbo MT-Bench of 8.98, which are known to be predictive of LMSys Chatbot Arena Elo + + As of 1 Oct 2024, this model is #1 on all three automatic alignment benchmarks (verified tab for AlpacaEval 2 LC), edging out strong frontier models such as GPT-4o and Claude 3.5 Sonnet. + + This model was trained using RLHF (specifically, REINFORCE), Llama-3.1-Nemotron-70B-Reward and HelpSteer2-Preference prompts on a Llama-3.1-70B-Instruct model as the initial policy. + + Llama-3.1-Nemotron-70B-Instruct-HF has been converted from Llama-3.1-Nemotron-70B-Instruct to support it in the HuggingFace Transformers codebase. Please note that evaluation results might be slightly different from the Llama-3.1-Nemotron-70B-Instruct as evaluated in NeMo-Aligner, which the evaluation results below are based on. + overrides: + parameters: + model: Llama-3.1-Nemotron-70B-Instruct-HF.Q4_K_M.gguf + files: + - filename: Llama-3.1-Nemotron-70B-Instruct-HF.Q4_K_M.gguf + sha256: b6b80001b849e3c59c39b09508c018b35b491a5c7bbafafa23f2fc04243f3e30 + uri: huggingface://mradermacher/Llama-3.1-Nemotron-70B-Instruct-HF-GGUF/Llama-3.1-Nemotron-70B-Instruct-HF.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 98dfa363db0e8f6f7f09f1739e726fb07cc1c3ce Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 09:59:42 +0200 Subject: [PATCH 0362/1530] models(gallery): add qevacot-7b-v2 (#3855) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f0565013..ae83f599 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -592,6 +592,25 @@ - filename: Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf sha256: 22e2003ecec7f1e91f2e9aaec334613c0f37fb3000d0e628b5a9980e53322fa7 uri: huggingface://mradermacher/Tsunami-0.5x-7B-Instruct-i1-GGUF/Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "qevacot-7b-v2" + urls: + - https://huggingface.co/bunnycore/Qevacot-7B-v2 + - https://huggingface.co/mradermacher/Qevacot-7B-v2-GGUF + description: | + This model was merged using the TIES merge method using Qwen/Qwen2.5-7B as a base. + The following models were included in the merge: + c10x/CoT-2.5 + EVA-UNIT-01/EVA-Qwen2.5-7B-v0.1 + huihui-ai/Qwen2.5-7B-Instruct-abliterated-v2 + Cran-May/T.E-8.1 + overrides: + parameters: + model: Qevacot-7B-v2.Q4_K_M.gguf + files: + - filename: Qevacot-7B-v2.Q4_K_M.gguf + sha256: a45b3d3b74bc68a5c7ac07d251cdeff671e64085d1816cd86fca6cfb7eab204e + uri: huggingface://mradermacher/Qevacot-7B-v2-GGUF/Qevacot-7B-v2.Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From 5927f9e43e23f12ba1a5aab649ad47467aadc22f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 10:03:08 +0200 Subject: [PATCH 0363/1530] models(gallery): add l3.1-etherealrainbow-v1.0-rc1-8b (#3856) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index ae83f599..77376e91 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1713,6 +1713,25 @@ - filename: Llama-3.1-Nemotron-70B-Instruct-HF.Q4_K_M.gguf sha256: b6b80001b849e3c59c39b09508c018b35b491a5c7bbafafa23f2fc04243f3e30 uri: huggingface://mradermacher/Llama-3.1-Nemotron-70B-Instruct-HF-GGUF/Llama-3.1-Nemotron-70B-Instruct-HF.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "l3.1-etherealrainbow-v1.0-rc1-8b" + icon: https://huggingface.co/invisietch/L3.1-EtherealRainbow-v1.0-rc1-8B/resolve/main/header.png + urls: + - https://huggingface.co/invisietch/L3.1-EtherealRainbow-v1.0-rc1-8B + - https://huggingface.co/mradermacher/L3.1-EtherealRainbow-v1.0-rc1-8B-GGUF + description: | + Ethereal Rainbow v1.0 is the sequel to the popular Llama 3 8B merge, EtherealRainbow v0.3. Instead of a straight merge of other peoples' models, v1.0 is a finetune on the Instruct model, using 245 million tokens of training data (approx 177 million of these tokens are my own novel datasets). + + This model is designed to be suitable for creative writing and roleplay, and to push the boundaries of what's possible with an 8B model. This RC is not a finished product, but your feedback will drive the creation of better models. + + This is a release candidate model. It has some known issues and probably some unknown ones too, because the purpose of these early releases is to seek feedback. + overrides: + parameters: + model: L3.1-EtherealRainbow-v1.0-rc1-8B.Q4_K_M.gguf + files: + - filename: L3.1-EtherealRainbow-v1.0-rc1-8B.Q4_K_M.gguf + sha256: c5556b2563112e512acca171415783f0988545b02c1834696c1cc35952def72c + uri: huggingface://mradermacher/L3.1-EtherealRainbow-v1.0-rc1-8B-GGUF/L3.1-EtherealRainbow-v1.0-rc1-8B.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 056d4b4fc9bf548aa0771e04162a4b823fdae563 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 10:06:38 +0200 Subject: [PATCH 0364/1530] models(gallery): add phi-3.5-mini-titanfusion-0.2 (#3857) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 77376e91..92ad3c28 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -6095,6 +6095,27 @@ - filename: calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf sha256: 989eccacd52b6d9ebf2c06c35c363da19aadb125659a10df299b7130bc293e77 uri: huggingface://mradermacher/calme-2.1-phi3.5-4b-i1-GGUF/calme-2.1-phi3.5-4b.i1-Q4_K_M.gguf +- !!merge <<: *phi-3 + name: "phi-3.5-mini-titanfusion-0.2" + urls: + - https://huggingface.co/bunnycore/Phi-3.5-mini-TitanFusion-0.2 + - https://huggingface.co/mradermacher/Phi-3.5-mini-TitanFusion-0.2-GGUF + description: | + This model was merged using the TIES merge method using microsoft/Phi-3.5-mini-instruct as a base. + The following models were included in the merge: + nbeerbower/phi3.5-gutenberg-4B + ArliAI/Phi-3.5-mini-3.8B-ArliAI-RPMax-v1.1 + bunnycore/Phi-3.5-Mini-Hyper + bunnycore/Phi-3.5-Mini-Hyper + bunnycore/Phi-3.1-EvolKit-lora + bunnycore/Phi-3.5-Mini-Sonet-RP + bunnycore/Phi-3.5-mini-TitanFusion-0.1 + overrides: + parameters: + model: Phi-3.5-mini-TitanFusion-0.2.Q4_K_M.gguf + files: + - filename: Phi-3.5-mini-TitanFusion-0.2.Q4_K_M.gguf + sha256: 9579305712f2bca246914639c4873acdc1e7bc64ac2c7db0230df4f0ca0ef234 + uri: huggingface://mradermacher/Phi-3.5-mini-TitanFusion-0.2-GGUF/Phi-3.5-mini-TitanFusion-0.2.Q4_K_M.gguf - &hermes-2-pro-mistral ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From bc7d4586ed8c85d0f8a2f4fcc30b4a4c2e10da55 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 10:08:57 +0200 Subject: [PATCH 0365/1530] models(gallery): add mn-lulanum-12b-fix-i1 (#3859) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 92ad3c28..d06897cb 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2377,6 +2377,25 @@ - filename: ML-MS-Etheris-123B.Q2_K.gguf sha256: a17c5615413b5c9c8d01cf55386573d0acd00e01f6e2bcdf492624c73c593fc3 uri: huggingface://mradermacher/ML-MS-Etheris-123B-GGUF/ML-MS-Etheris-123B.Q2_K.gguf +- !!merge <<: *mistral03 + name: "mn-lulanum-12b-fix-i1" + urls: + - https://huggingface.co/djuna/MN-Lulanum-12B-FIX + - https://huggingface.co/mradermacher/MN-Lulanum-12B-FIX-i1-GGUF + description: | + This model was merged using the della_linear merge method using unsloth/Mistral-Nemo-Base-2407 as a base. + The following models were included in the merge: + VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct + anthracite-org/magnum-v2.5-12b-kto + Undi95/LocalC-12B-e2.0 + NeverSleep/Lumimaid-v0.2-12B + overrides: + parameters: + model: MN-Lulanum-12B-FIX.i1-Q4_K_M.gguf + files: + - filename: MN-Lulanum-12B-FIX.i1-Q4_K_M.gguf + sha256: 7e24d57249059d45bb508565ec3055e585a4e658c1815c67ea92397acc6aa775 + uri: huggingface://mradermacher/MN-Lulanum-12B-FIX-i1-GGUF/MN-Lulanum-12B-FIX.i1-Q4_K_M.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From e65e3253a3210aafffa62be72c696ac89bcb92f4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 10:16:52 +0200 Subject: [PATCH 0366/1530] models(gallery): add apollo2-9b (#3860) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ gallery/vicuna-chat.yaml | 4 ++++ 2 files changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d06897cb..f87d1419 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3023,6 +3023,20 @@ - filename: Gemma-2-Ataraxy-v3i-9B.Q4_K_M.gguf sha256: f14c5b9373d4058f0f812c6c34184addeb4aeeecb02a7bbcf9844d9afc8d0066 uri: huggingface://QuantFactory/Gemma-2-Ataraxy-v3i-9B-GGUF/Gemma-2-Ataraxy-v3i-9B.Q4_K_M.gguf +- !!merge <<: *gemma + name: "apollo2-9b" + url: "github:mudler/LocalAI/gallery/vicuna-chat.yaml@master" + urls: + - https://huggingface.co/mradermacher/Apollo2-9B-GGUF + description: | + Covering 12 Major Languages including English, Chinese, French, Hindi, Spanish, Arabic, Russian, Japanese, Korean, German, Italian, Portuguese and 38 Minor Languages So far. + overrides: + parameters: + model: Apollo2-9B.Q4_K_M.gguf + files: + - filename: Apollo2-9B.Q4_K_M.gguf + sha256: 9fdb63f78e574558a4f33782eca88716eea28e90ea3ae36c381769cde6b81e0f + uri: huggingface://mradermacher/Apollo2-9B-GGUF/Apollo2-9B.Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png diff --git a/gallery/vicuna-chat.yaml b/gallery/vicuna-chat.yaml index 05600e66..68310549 100644 --- a/gallery/vicuna-chat.yaml +++ b/gallery/vicuna-chat.yaml @@ -14,6 +14,10 @@ config_file: | system: "System: " assistant: "Assistant: " f16: true + stopwords: + - <|end|> + - <|endoftext|> + - template: completion: | Complete the following sentence: {{.Input}} From e416843f2268179cf86ed2b5af675b53d84de9fc Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 11:06:24 +0200 Subject: [PATCH 0367/1530] models(gallery): add theia-llama-3.1-8b-v1 (#3861) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f87d1419..f1a2c782 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1732,6 +1732,20 @@ - filename: L3.1-EtherealRainbow-v1.0-rc1-8B.Q4_K_M.gguf sha256: c5556b2563112e512acca171415783f0988545b02c1834696c1cc35952def72c uri: huggingface://mradermacher/L3.1-EtherealRainbow-v1.0-rc1-8B-GGUF/L3.1-EtherealRainbow-v1.0-rc1-8B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "theia-llama-3.1-8b-v1" + urls: + - https://huggingface.co/Chainbase-Labs/Theia-Llama-3.1-8B-v1 + - https://huggingface.co/QuantFactory/Theia-Llama-3.1-8B-v1-GGUF + description: | + Theia-Llama-3.1-8B-v1 is an open-source large language model (LLM) trained specifically in the cryptocurrency domain. It was fine-tuned from the Llama-3.1-8B base model using a dataset curated from top 2000 cryptocurrency projects and comprehensive research reports to specialize in crypto-related tasks. Theia-Llama-3.1-8B-v1 has been quantized to optimize it for efficient deployment and reduced memory footprint. It's benchmarked highly for crypto knowledge comprehension and generation, knowledge coverage, and reasoning capabilities. The system prompt used for its training is "You are a helpful assistant who will answer crypto related questions." The recommended parameters for performance include sequence length of 256, temperature of 0, top-k-sampling of -1, top-p of 1, and context window of 39680. + overrides: + parameters: + model: Theia-Llama-3.1-8B-v1.Q4_K_M.gguf + files: + - filename: Theia-Llama-3.1-8B-v1.Q4_K_M.gguf + sha256: db876d033f86f118b49a1f1006e5d078d494c93b73c7e595bd10ca789a0c8fdb + uri: huggingface://QuantFactory/Theia-Llama-3.1-8B-v1-GGUF/Theia-Llama-3.1-8B-v1.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 0da16c73ba49ce11aa5512c8cf8d1f3910f3e6f3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 11:10:36 +0200 Subject: [PATCH 0368/1530] models(gallery): add tor-8b (#3862) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f1a2c782..f6963596 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2410,6 +2410,21 @@ - filename: MN-Lulanum-12B-FIX.i1-Q4_K_M.gguf sha256: 7e24d57249059d45bb508565ec3055e585a4e658c1815c67ea92397acc6aa775 uri: huggingface://mradermacher/MN-Lulanum-12B-FIX-i1-GGUF/MN-Lulanum-12B-FIX.i1-Q4_K_M.gguf +- !!merge <<: *mistral03 + name: "tor-8b" + icon: https://huggingface.co/Delta-Vector/Tor-8B/resolve/main/FinalTor8B.jpg + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + urls: + - https://huggingface.co/QuantFactory/Tor-8B-GGUF + description: | + An earlier checkpoint of Darkens-8B using the same configuration that i felt was different enough from it's 4 epoch cousin to release, Finetuned ontop of the Prune/Distill NeMo 8B done by Nvidia, This model aims to have generally good prose and writing while not falling into claude-isms. + overrides: + parameters: + model: Tor-8B.Q4_K_M.gguf + files: + - filename: Tor-8B.Q4_K_M.gguf + sha256: 9dd64bd886aa7682b6179340449b38feda405b44722ef7ac752cedb807af370e + uri: huggingface://QuantFactory/Tor-8B-GGUF/Tor-8B.Q4_K_M.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From 52bc463a3ff6c26fc66df4fd6b6690c2c48dc533 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 11:16:41 +0200 Subject: [PATCH 0369/1530] models(gallery): add darkens-8b (#3863) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f6963596..9309c51e 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2425,6 +2425,21 @@ - filename: Tor-8B.Q4_K_M.gguf sha256: 9dd64bd886aa7682b6179340449b38feda405b44722ef7ac752cedb807af370e uri: huggingface://QuantFactory/Tor-8B-GGUF/Tor-8B.Q4_K_M.gguf +- !!merge <<: *mistral03 + name: "darkens-8b" + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + urls: + - https://huggingface.co/Delta-Vector/Darkens-8B + - https://huggingface.co/QuantFactory/Darkens-8B-GGUF + description: | + This is the fully cooked, 4 epoch version of Tor-8B, this is an experimental version, despite being trained for 4 epochs, the model feels fresh and new and is not overfit, This model aims to have generally good prose and writing while not falling into claude-isms, it follows the actions "dialogue" format heavily. + overrides: + parameters: + model: Darkens-8B.Q4_K_M.gguf + files: + - filename: Darkens-8B.Q4_K_M.gguf + sha256: f56a483e10fd00957460adfc16ee462cecac892a4fb44dc59e466e68a360fd42 + uri: huggingface://QuantFactory/Darkens-8B-GGUF/Darkens-8B.Q4_K_M.gguf - &mudler ### START mudler's LocalAI specific-models url: "github:mudler/LocalAI/gallery/mudler.yaml@master" From e45e8a58fc251209bc6ee44934e9bb59eab7360e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 11:20:56 +0200 Subject: [PATCH 0370/1530] models(gallery): add baldur-8b (#3864) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 9309c51e..3b93cfa8 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1746,6 +1746,21 @@ - filename: Theia-Llama-3.1-8B-v1.Q4_K_M.gguf sha256: db876d033f86f118b49a1f1006e5d078d494c93b73c7e595bd10ca789a0c8fdb uri: huggingface://QuantFactory/Theia-Llama-3.1-8B-v1-GGUF/Theia-Llama-3.1-8B-v1.Q4_K_M.gguf +- !!merge <<: *llama31 + icon: https://huggingface.co/Delta-Vector/Baldur-8B/resolve/main/Baldur.jpg + name: "baldur-8b" + urls: + - https://huggingface.co/QuantFactory/Baldur-8B-GGUF + - https://huggingface.co/QuantFactory/Baldur-8B-GGUF + description: | + An finetune of the L3.1 instruct distill done by Arcee, The intent of this model is to have differing prose then my other releases, in my testing it has achieved this and avoiding using common -isms frequently and has a differing flavor then my other models. + overrides: + parameters: + model: Baldur-8B.Q4_K_M.gguf + files: + - filename: Baldur-8B.Q4_K_M.gguf + sha256: 645b393fbac5cd17ccfd66840a3a05c3930e01b903dd1535f0347a74cc443fc7 + uri: huggingface://QuantFactory/Baldur-8B-GGUF/Baldur-8B.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 54c0f153e2a2e3e53ae845400ee187c6ae297459 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 11:25:32 +0200 Subject: [PATCH 0371/1530] models(gallery): add meissa-qwen2.5-7b-instruct (#3865) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 3b93cfa8..46cdc954 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -611,6 +611,23 @@ - filename: Qevacot-7B-v2.Q4_K_M.gguf sha256: a45b3d3b74bc68a5c7ac07d251cdeff671e64085d1816cd86fca6cfb7eab204e uri: huggingface://mradermacher/Qevacot-7B-v2-GGUF/Qevacot-7B-v2.Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "meissa-qwen2.5-7b-instruct" + icon: https://huggingface.co/Orion-zhen/Meissa-Qwen2.5-7B-Instruct/resolve/main/meissa.jpg + urls: + - https://huggingface.co/Orion-zhen/Meissa-Qwen2.5-7B-Instruct + - https://huggingface.co/QuantFactory/Meissa-Qwen2.5-7B-Instruct-GGUF + description: | + Meissa is designated Lambda Orionis, forms Orion's head, and is a multiple star with a combined apparent magnitude of 3.33. Its name means the "shining one". + This model is fine tuned over writing and role playing datasets (maybe the first on qwen2.5-7b), aiming to enhance model's performance in novel writing and roleplaying. + The model is fine-tuned over Orion-zhen/Qwen2.5-7B-Instruct-Uncensored + overrides: + parameters: + model: Meissa-Qwen2.5-7B-Instruct.Q4_K_M.gguf + files: + - filename: Meissa-Qwen2.5-7B-Instruct.Q4_K_M.gguf + sha256: 632b10d5c0e98bc8d53295886da2d57772a54bb6f6fa01d458e9e8c7fa9c905a + uri: huggingface://QuantFactory/Meissa-Qwen2.5-7B-Instruct-GGUF/Meissa-Qwen2.5-7B-Instruct.Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From 9db068388b7e0d9394161b57a84845273fbaf647 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 17:32:57 +0200 Subject: [PATCH 0372/1530] fix(vllm): images and videos are base64 by default (#3867) Signed-off-by: Ettore Di Giacinto --- backend/python/vllm/backend.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index dfbb1503..98f292ab 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -19,6 +19,8 @@ from vllm.utils import random_uuid from vllm.transformers_utils.tokenizer import get_tokenizer from vllm.multimodal.utils import fetch_image from vllm.assets.video import VideoAsset +import base64 +import io _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -262,16 +264,19 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): def load_image(self, image_path: str): """ - Load an image from the given file path. + Load an image from the given file path or base64 encoded data. Args: - image_path (str): The path to the image file. + image_path (str): The path to the image file or base64 encoded data. Returns: Image: The loaded image. """ try: - return Image.open(image_path) + + image_data = base64.b64decode(image_path) + image = Image.open(io.BytesIO(image_data)) + return image except Exception as e: print(f"Error loading image {image_path}: {e}", file=sys.stderr) return self.load_video(image_path) @@ -287,10 +292,15 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): Video: The loaded video. """ try: - video = VideoAsset(name=video_path).np_ndarrays + timestamp = str(int(time.time() * 1000)) # Generate timestamp + p = f"/tmp/vl-{timestamp}.data" # Use timestamp in filename + with open(p, "wb") as f: + f.write(base64.b64decode(video_path)) + video = VideoAsset(name=p).np_ndarrays + os.remove(p) return video except Exception as e: - print(f"Error loading video {image_path}: {e}", file=sys.stderr) + print(f"Error loading video {video_path}: {e}", file=sys.stderr) return None async def serve(address): From d5da8c3509d1e23d1ebcf82a4c9d9964eb1b549a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 17:33:50 +0200 Subject: [PATCH 0373/1530] feat(templates): extract text from multimodal requests (#3866) When offloading template construction to the backend, we want to keep text around in case of multimodal requests. Signed-off-by: Ettore Di Giacinto --- core/backend/llm.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/backend/llm.go b/core/backend/llm.go index d946d3f8..199a6233 100644 --- a/core/backend/llm.go +++ b/core/backend/llm.go @@ -2,6 +2,7 @@ package backend import ( "context" + "encoding/json" "fmt" "os" "regexp" @@ -77,6 +78,16 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im switch ct := message.Content.(type) { case string: protoMessages[i].Content = ct + case []interface{}: + // If using the tokenizer template, in case of multimodal we want to keep the multimodal content as and return only strings here + data, _ := json.Marshal(ct) + resultData := []struct { + Text string `json:"text"` + }{} + json.Unmarshal(data, &resultData) + for _, r := range resultData { + protoMessages[i].Content += r.Text + } default: return nil, fmt.Errorf("unsupported type for schema.Message.Content for inference: %T", ct) } From e1db6dce8219c6994a637c1bb9c1d1bb19ca1576 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 17:34:20 +0200 Subject: [PATCH 0374/1530] feat(templates): add sprig to multimodal templates (#3868) Signed-off-by: Ettore Di Giacinto --- pkg/templates/multimodal.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/templates/multimodal.go b/pkg/templates/multimodal.go index cc56c492..a2056640 100644 --- a/pkg/templates/multimodal.go +++ b/pkg/templates/multimodal.go @@ -3,11 +3,13 @@ package templates import ( "bytes" "text/template" + + "github.com/Masterminds/sprig/v3" ) func TemplateMultiModal(templateString string, templateID int, text string) (string, error) { // compile the template - tmpl, err := template.New("template").Parse(templateString) + tmpl, err := template.New("template").Funcs(sprig.FuncMap()).Parse(templateString) if err != nil { return "", err } From fd4043266bf1369765ddffc6ca413feeae6c5d17 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 17:49:03 +0200 Subject: [PATCH 0375/1530] Update README.md Signed-off-by: Ettore Di Giacinto --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7647105b..0b2eee92 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,9 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu To load models: ```bash -# Start LocalAI with the phi-2 model +# From the model gallery (see available models with `local-ai models list`, in the WebUI from the model tab, or visiting https://models.localai.io) +local-ai run llama-3.2-1b-instruct:q4_k_m +# Start LocalAI with the phi-2 model directly from huggingface local-ai run huggingface://TheBloke/phi-2-GGUF/phi-2.Q8_0.gguf # Install and run a model from the Ollama OCI registry local-ai run ollama://gemma:2b From dcabda42d1db925c6274da5a2f50410a16f7e3ca Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 17 Oct 2024 23:04:11 +0200 Subject: [PATCH 0376/1530] fix(mamba): pin torch version (#3871) causal-conv1d supports only torch 2.4.x, not torch 2.5.x Signed-off-by: Ettore Di Giacinto --- backend/python/mamba/requirements-cpu.txt | 2 +- backend/python/mamba/requirements-cublas11.txt | 2 +- backend/python/mamba/requirements-cublas12.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/python/mamba/requirements-cpu.txt b/backend/python/mamba/requirements-cpu.txt index 39dab0fd..b4f1261f 100644 --- a/backend/python/mamba/requirements-cpu.txt +++ b/backend/python/mamba/requirements-cpu.txt @@ -1,2 +1,2 @@ -torch +torch==2.4.1 transformers \ No newline at end of file diff --git a/backend/python/mamba/requirements-cublas11.txt b/backend/python/mamba/requirements-cublas11.txt index 7048a14f..ed0d4df5 100644 --- a/backend/python/mamba/requirements-cublas11.txt +++ b/backend/python/mamba/requirements-cublas11.txt @@ -1,3 +1,3 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 transformers \ No newline at end of file diff --git a/backend/python/mamba/requirements-cublas12.txt b/backend/python/mamba/requirements-cublas12.txt index 39dab0fd..b4f1261f 100644 --- a/backend/python/mamba/requirements-cublas12.txt +++ b/backend/python/mamba/requirements-cublas12.txt @@ -1,2 +1,2 @@ -torch +torch==2.4.1 transformers \ No newline at end of file From 08e1e2251e3df568fd7ba2d5ec0e8c0f5b1bb681 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 17 Oct 2024 23:05:04 +0200 Subject: [PATCH 0377/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `99bd4ac28c32cd17c0e337ff5601393b033dc5fc` (#3869) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 374b0e00..9c2a2054 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=9e041024481f6b249ab8918e18b9477f873b5a5e +CPPLLAMA_VERSION?=99bd4ac28c32cd17c0e337ff5601393b033dc5fc # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From a26fb548b15e3999e10118def52c5a499dc22257 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 17 Oct 2024 23:05:26 +0200 Subject: [PATCH 0378/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `a5abfe6a90495f7bf19fe70d016ecc255e97359c` (#3870) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9c2a2054..48c8d188 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=d3f7137cc9befa6d74dc4085de2b664b97b7c8bb +WHISPER_CPP_VERSION?=a5abfe6a90495f7bf19fe70d016ecc255e97359c # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 3e77a17b26fa303503e19ade0b5f5ed2be62aa1f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 18 Oct 2024 09:11:59 +0200 Subject: [PATCH 0379/1530] fix(dependencies): pin pytorch version (#3872) Signed-off-by: Ettore Di Giacinto --- backend/python/autogptq/requirements-cublas11.txt | 2 +- backend/python/autogptq/requirements-cublas12.txt | 2 +- backend/python/autogptq/requirements-hipblas.txt | 2 +- backend/python/bark/requirements-cpu.txt | 4 ++-- backend/python/bark/requirements-cublas11.txt | 4 ++-- backend/python/bark/requirements-cublas12.txt | 4 ++-- backend/python/bark/requirements-hipblas.txt | 4 ++-- backend/python/coqui/requirements-cpu.txt | 2 +- backend/python/coqui/requirements-cublas11.txt | 4 ++-- backend/python/coqui/requirements-cublas12.txt | 4 ++-- backend/python/coqui/requirements-hipblas.txt | 4 ++-- backend/python/diffusers/requirements-cpu.txt | 2 +- backend/python/diffusers/requirements-cublas11.txt | 2 +- backend/python/diffusers/requirements-cublas12.txt | 2 +- backend/python/exllama2/requirements-cpu.txt | 2 +- backend/python/exllama2/requirements-cublas11.txt | 2 +- backend/python/exllama2/requirements-cublas12.txt | 2 +- backend/python/openvoice/requirements-cpu.txt | 2 +- backend/python/openvoice/requirements-cublas11.txt | 2 +- backend/python/openvoice/requirements-cublas12.txt | 2 +- backend/python/parler-tts/requirements-cpu.txt | 2 +- backend/python/parler-tts/requirements-cublas11.txt | 4 ++-- backend/python/parler-tts/requirements-cublas12.txt | 4 ++-- backend/python/rerankers/requirements-cpu.txt | 2 +- backend/python/rerankers/requirements-cublas11.txt | 2 +- backend/python/rerankers/requirements-cublas12.txt | 2 +- backend/python/rerankers/requirements-hipblas.txt | 2 +- backend/python/sentencetransformers/requirements-cpu.txt | 2 +- backend/python/sentencetransformers/requirements-cublas11.txt | 2 +- backend/python/sentencetransformers/requirements-cublas12.txt | 2 +- backend/python/sentencetransformers/requirements-hipblas.txt | 2 +- backend/python/transformers-musicgen/requirements-cpu.txt | 2 +- .../python/transformers-musicgen/requirements-cublas11.txt | 2 +- .../python/transformers-musicgen/requirements-cublas12.txt | 2 +- backend/python/transformers-musicgen/requirements-hipblas.txt | 2 +- backend/python/transformers/requirements-cpu.txt | 2 +- backend/python/transformers/requirements-cublas11.txt | 2 +- backend/python/transformers/requirements-cublas12.txt | 2 +- backend/python/transformers/requirements-hipblas.txt | 2 +- backend/python/vall-e-x/requirements-cpu.txt | 4 ++-- backend/python/vall-e-x/requirements-cublas11.txt | 4 ++-- backend/python/vall-e-x/requirements-cublas12.txt | 4 ++-- backend/python/vllm/requirements-cpu.txt | 2 +- backend/python/vllm/requirements-cublas11.txt | 2 +- backend/python/vllm/requirements-cublas12.txt | 2 +- backend/python/vllm/requirements-hipblas.txt | 2 +- 46 files changed, 58 insertions(+), 58 deletions(-) diff --git a/backend/python/autogptq/requirements-cublas11.txt b/backend/python/autogptq/requirements-cublas11.txt index 6461b696..cf469472 100644 --- a/backend/python/autogptq/requirements-cublas11.txt +++ b/backend/python/autogptq/requirements-cublas11.txt @@ -1,2 +1,2 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 diff --git a/backend/python/autogptq/requirements-cublas12.txt b/backend/python/autogptq/requirements-cublas12.txt index 12c6d5d5..20f84cf7 100644 --- a/backend/python/autogptq/requirements-cublas12.txt +++ b/backend/python/autogptq/requirements-cublas12.txt @@ -1 +1 @@ -torch +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/autogptq/requirements-hipblas.txt b/backend/python/autogptq/requirements-hipblas.txt index 76018445..ecd817dc 100644 --- a/backend/python/autogptq/requirements-hipblas.txt +++ b/backend/python/autogptq/requirements-hipblas.txt @@ -1,2 +1,2 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch \ No newline at end of file +torch==2.4.1+rocm6.0 \ No newline at end of file diff --git a/backend/python/bark/requirements-cpu.txt b/backend/python/bark/requirements-cpu.txt index 0b2c3bc7..12e376ad 100644 --- a/backend/python/bark/requirements-cpu.txt +++ b/backend/python/bark/requirements-cpu.txt @@ -1,4 +1,4 @@ transformers accelerate -torch -torchaudio \ No newline at end of file +torch==2.4.1 +torchaudio==2.4.1 \ No newline at end of file diff --git a/backend/python/bark/requirements-cublas11.txt b/backend/python/bark/requirements-cublas11.txt index 71a6a93f..9f8fe9ff 100644 --- a/backend/python/bark/requirements-cublas11.txt +++ b/backend/python/bark/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch -torchaudio +torch==2.4.1+cu118 +torchaudio==2.4.1+cu118 transformers accelerate \ No newline at end of file diff --git a/backend/python/bark/requirements-cublas12.txt b/backend/python/bark/requirements-cublas12.txt index 0fa27074..53716949 100644 --- a/backend/python/bark/requirements-cublas12.txt +++ b/backend/python/bark/requirements-cublas12.txt @@ -1,4 +1,4 @@ -torch -torchaudio +torch==2.4.1 +torchaudio==2.4.1 transformers accelerate \ No newline at end of file diff --git a/backend/python/bark/requirements-hipblas.txt b/backend/python/bark/requirements-hipblas.txt index af9e820e..1d54fb16 100644 --- a/backend/python/bark/requirements-hipblas.txt +++ b/backend/python/bark/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch -torchaudio +torch==2.4.1+rocm6.0 +torchaudio==2.4.1+rocm6.0 transformers accelerate \ No newline at end of file diff --git a/backend/python/coqui/requirements-cpu.txt b/backend/python/coqui/requirements-cpu.txt index bbcdc8cd..2021fc20 100644 --- a/backend/python/coqui/requirements-cpu.txt +++ b/backend/python/coqui/requirements-cpu.txt @@ -1,3 +1,3 @@ transformers accelerate -torch \ No newline at end of file +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/coqui/requirements-cublas11.txt b/backend/python/coqui/requirements-cublas11.txt index 71a6a93f..9f8fe9ff 100644 --- a/backend/python/coqui/requirements-cublas11.txt +++ b/backend/python/coqui/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch -torchaudio +torch==2.4.1+cu118 +torchaudio==2.4.1+cu118 transformers accelerate \ No newline at end of file diff --git a/backend/python/coqui/requirements-cublas12.txt b/backend/python/coqui/requirements-cublas12.txt index 0fa27074..53716949 100644 --- a/backend/python/coqui/requirements-cublas12.txt +++ b/backend/python/coqui/requirements-cublas12.txt @@ -1,4 +1,4 @@ -torch -torchaudio +torch==2.4.1 +torchaudio==2.4.1 transformers accelerate \ No newline at end of file diff --git a/backend/python/coqui/requirements-hipblas.txt b/backend/python/coqui/requirements-hipblas.txt index af9e820e..1d54fb16 100644 --- a/backend/python/coqui/requirements-hipblas.txt +++ b/backend/python/coqui/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch -torchaudio +torch==2.4.1+rocm6.0 +torchaudio==2.4.1+rocm6.0 transformers accelerate \ No newline at end of file diff --git a/backend/python/diffusers/requirements-cpu.txt b/backend/python/diffusers/requirements-cpu.txt index 235bb57e..20667cc0 100644 --- a/backend/python/diffusers/requirements-cpu.txt +++ b/backend/python/diffusers/requirements-cpu.txt @@ -5,5 +5,5 @@ accelerate compel peft sentencepiece -torch +torch==2.4.1 optimum-quanto \ No newline at end of file diff --git a/backend/python/diffusers/requirements-cublas11.txt b/backend/python/diffusers/requirements-cublas11.txt index 40e718cb..19e2d72e 100644 --- a/backend/python/diffusers/requirements-cublas11.txt +++ b/backend/python/diffusers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 diffusers opencv-python transformers diff --git a/backend/python/diffusers/requirements-cublas12.txt b/backend/python/diffusers/requirements-cublas12.txt index 3bcc5397..3992b039 100644 --- a/backend/python/diffusers/requirements-cublas12.txt +++ b/backend/python/diffusers/requirements-cublas12.txt @@ -1,4 +1,4 @@ -torch +torch==2.4.1 diffusers opencv-python transformers diff --git a/backend/python/exllama2/requirements-cpu.txt b/backend/python/exllama2/requirements-cpu.txt index bbcdc8cd..2021fc20 100644 --- a/backend/python/exllama2/requirements-cpu.txt +++ b/backend/python/exllama2/requirements-cpu.txt @@ -1,3 +1,3 @@ transformers accelerate -torch \ No newline at end of file +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/exllama2/requirements-cublas11.txt b/backend/python/exllama2/requirements-cublas11.txt index 1dfb5b98..2d1958c7 100644 --- a/backend/python/exllama2/requirements-cublas11.txt +++ b/backend/python/exllama2/requirements-cublas11.txt @@ -1,4 +1,4 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 transformers accelerate \ No newline at end of file diff --git a/backend/python/exllama2/requirements-cublas12.txt b/backend/python/exllama2/requirements-cublas12.txt index 1ec544cd..93e62c5a 100644 --- a/backend/python/exllama2/requirements-cublas12.txt +++ b/backend/python/exllama2/requirements-cublas12.txt @@ -1,3 +1,3 @@ -torch +torch==2.4.1 transformers accelerate \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cpu.txt b/backend/python/openvoice/requirements-cpu.txt index 08ed5eeb..20f84cf7 100644 --- a/backend/python/openvoice/requirements-cpu.txt +++ b/backend/python/openvoice/requirements-cpu.txt @@ -1 +1 @@ -torch \ No newline at end of file +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cublas11.txt b/backend/python/openvoice/requirements-cublas11.txt index 6461b696..cf469472 100644 --- a/backend/python/openvoice/requirements-cublas11.txt +++ b/backend/python/openvoice/requirements-cublas11.txt @@ -1,2 +1,2 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 diff --git a/backend/python/openvoice/requirements-cublas12.txt b/backend/python/openvoice/requirements-cublas12.txt index 12c6d5d5..20f84cf7 100644 --- a/backend/python/openvoice/requirements-cublas12.txt +++ b/backend/python/openvoice/requirements-cublas12.txt @@ -1 +1 @@ -torch +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/parler-tts/requirements-cpu.txt b/backend/python/parler-tts/requirements-cpu.txt index bbcdc8cd..2021fc20 100644 --- a/backend/python/parler-tts/requirements-cpu.txt +++ b/backend/python/parler-tts/requirements-cpu.txt @@ -1,3 +1,3 @@ transformers accelerate -torch \ No newline at end of file +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/parler-tts/requirements-cublas11.txt b/backend/python/parler-tts/requirements-cublas11.txt index 71a6a93f..9f8fe9ff 100644 --- a/backend/python/parler-tts/requirements-cublas11.txt +++ b/backend/python/parler-tts/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch -torchaudio +torch==2.4.1+cu118 +torchaudio==2.4.1+cu118 transformers accelerate \ No newline at end of file diff --git a/backend/python/parler-tts/requirements-cublas12.txt b/backend/python/parler-tts/requirements-cublas12.txt index 0fa27074..53716949 100644 --- a/backend/python/parler-tts/requirements-cublas12.txt +++ b/backend/python/parler-tts/requirements-cublas12.txt @@ -1,4 +1,4 @@ -torch -torchaudio +torch==2.4.1 +torchaudio==2.4.1 transformers accelerate \ No newline at end of file diff --git a/backend/python/rerankers/requirements-cpu.txt b/backend/python/rerankers/requirements-cpu.txt index 25a1d8ab..e27a4726 100644 --- a/backend/python/rerankers/requirements-cpu.txt +++ b/backend/python/rerankers/requirements-cpu.txt @@ -1,4 +1,4 @@ transformers accelerate -torch +torch==2.4.1 rerankers[transformers] \ No newline at end of file diff --git a/backend/python/rerankers/requirements-cublas11.txt b/backend/python/rerankers/requirements-cublas11.txt index 06c4b2cf..fef296fe 100644 --- a/backend/python/rerankers/requirements-cublas11.txt +++ b/backend/python/rerankers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 transformers accelerate -torch +torch==2.4.1+cu118 rerankers[transformers] \ No newline at end of file diff --git a/backend/python/rerankers/requirements-cublas12.txt b/backend/python/rerankers/requirements-cublas12.txt index 25a1d8ab..e27a4726 100644 --- a/backend/python/rerankers/requirements-cublas12.txt +++ b/backend/python/rerankers/requirements-cublas12.txt @@ -1,4 +1,4 @@ transformers accelerate -torch +torch==2.4.1 rerankers[transformers] \ No newline at end of file diff --git a/backend/python/rerankers/requirements-hipblas.txt b/backend/python/rerankers/requirements-hipblas.txt index 961d150c..b1c8baed 100644 --- a/backend/python/rerankers/requirements-hipblas.txt +++ b/backend/python/rerankers/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 transformers accelerate -torch +torch==2.4.1+rocm6.0 rerankers[transformers] \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cpu.txt b/backend/python/sentencetransformers/requirements-cpu.txt index bc0e899e..70a3eb9b 100644 --- a/backend/python/sentencetransformers/requirements-cpu.txt +++ b/backend/python/sentencetransformers/requirements-cpu.txt @@ -1,4 +1,4 @@ -torch +torch==2.4.1 accelerate transformers bitsandbytes diff --git a/backend/python/sentencetransformers/requirements-cublas11.txt b/backend/python/sentencetransformers/requirements-cublas11.txt index 7cd277f7..155bb488 100644 --- a/backend/python/sentencetransformers/requirements-cublas11.txt +++ b/backend/python/sentencetransformers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 accelerate sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-cublas12.txt b/backend/python/sentencetransformers/requirements-cublas12.txt index aa289073..a67138d9 100644 --- a/backend/python/sentencetransformers/requirements-cublas12.txt +++ b/backend/python/sentencetransformers/requirements-cublas12.txt @@ -1,4 +1,4 @@ -torch +torch==2.4.1 accelerate sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements-hipblas.txt b/backend/python/sentencetransformers/requirements-hipblas.txt index 793bea16..c1123b89 100644 --- a/backend/python/sentencetransformers/requirements-hipblas.txt +++ b/backend/python/sentencetransformers/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch +torch==2.4.1+rocm6.0 accelerate sentence-transformers==3.2.0 transformers \ No newline at end of file diff --git a/backend/python/transformers-musicgen/requirements-cpu.txt b/backend/python/transformers-musicgen/requirements-cpu.txt index bbcdc8cd..2021fc20 100644 --- a/backend/python/transformers-musicgen/requirements-cpu.txt +++ b/backend/python/transformers-musicgen/requirements-cpu.txt @@ -1,3 +1,3 @@ transformers accelerate -torch \ No newline at end of file +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/transformers-musicgen/requirements-cublas11.txt b/backend/python/transformers-musicgen/requirements-cublas11.txt index 191a6eef..cd2c9fdb 100644 --- a/backend/python/transformers-musicgen/requirements-cublas11.txt +++ b/backend/python/transformers-musicgen/requirements-cublas11.txt @@ -1,4 +1,4 @@ --extra-index-url https://download.pytorch.org/whl/cu118 transformers accelerate -torch \ No newline at end of file +torch==2.4.1+cu118 \ No newline at end of file diff --git a/backend/python/transformers-musicgen/requirements-cublas12.txt b/backend/python/transformers-musicgen/requirements-cublas12.txt index bbcdc8cd..2021fc20 100644 --- a/backend/python/transformers-musicgen/requirements-cublas12.txt +++ b/backend/python/transformers-musicgen/requirements-cublas12.txt @@ -1,3 +1,3 @@ transformers accelerate -torch \ No newline at end of file +torch==2.4.1 \ No newline at end of file diff --git a/backend/python/transformers-musicgen/requirements-hipblas.txt b/backend/python/transformers-musicgen/requirements-hipblas.txt index 00f0a946..122b2032 100644 --- a/backend/python/transformers-musicgen/requirements-hipblas.txt +++ b/backend/python/transformers-musicgen/requirements-hipblas.txt @@ -1,4 +1,4 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 transformers accelerate -torch \ No newline at end of file +torch==2.4.1+rocm6.0 \ No newline at end of file diff --git a/backend/python/transformers/requirements-cpu.txt b/backend/python/transformers/requirements-cpu.txt index f1e6281b..f99aa18f 100644 --- a/backend/python/transformers/requirements-cpu.txt +++ b/backend/python/transformers/requirements-cpu.txt @@ -1,4 +1,4 @@ -torch +torch==2.4.1 accelerate transformers bitsandbytes \ No newline at end of file diff --git a/backend/python/transformers/requirements-cublas11.txt b/backend/python/transformers/requirements-cublas11.txt index 0abd72d9..2c1d0755 100644 --- a/backend/python/transformers/requirements-cublas11.txt +++ b/backend/python/transformers/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 -torch +torch==2.4.1+cu118 accelerate transformers bitsandbytes \ No newline at end of file diff --git a/backend/python/transformers/requirements-cublas12.txt b/backend/python/transformers/requirements-cublas12.txt index f1e6281b..f99aa18f 100644 --- a/backend/python/transformers/requirements-cublas12.txt +++ b/backend/python/transformers/requirements-cublas12.txt @@ -1,4 +1,4 @@ -torch +torch==2.4.1 accelerate transformers bitsandbytes \ No newline at end of file diff --git a/backend/python/transformers/requirements-hipblas.txt b/backend/python/transformers/requirements-hipblas.txt index f6900af1..f9577fab 100644 --- a/backend/python/transformers/requirements-hipblas.txt +++ b/backend/python/transformers/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch +torch==2.4.1+rocm6.0 accelerate transformers bitsandbytes \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-cpu.txt b/backend/python/vall-e-x/requirements-cpu.txt index 3a3304c0..0aad8812 100644 --- a/backend/python/vall-e-x/requirements-cpu.txt +++ b/backend/python/vall-e-x/requirements-cpu.txt @@ -1,3 +1,3 @@ accelerate -torch -torchaudio \ No newline at end of file +torch==2.4.1 +torchaudio==2.4.1 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-cublas11.txt b/backend/python/vall-e-x/requirements-cublas11.txt index 4e0a151a..c45de5b7 100644 --- a/backend/python/vall-e-x/requirements-cublas11.txt +++ b/backend/python/vall-e-x/requirements-cublas11.txt @@ -1,4 +1,4 @@ --extra-index-url https://download.pytorch.org/whl/cu118 accelerate -torch -torchaudio \ No newline at end of file +torch==2.4.1+cu118 +torchaudio==2.4.1+cu118 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements-cublas12.txt b/backend/python/vall-e-x/requirements-cublas12.txt index 3a3304c0..0aad8812 100644 --- a/backend/python/vall-e-x/requirements-cublas12.txt +++ b/backend/python/vall-e-x/requirements-cublas12.txt @@ -1,3 +1,3 @@ accelerate -torch -torchaudio \ No newline at end of file +torch==2.4.1 +torchaudio==2.4.1 \ No newline at end of file diff --git a/backend/python/vllm/requirements-cpu.txt b/backend/python/vllm/requirements-cpu.txt index 765a1ef5..84058901 100644 --- a/backend/python/vllm/requirements-cpu.txt +++ b/backend/python/vllm/requirements-cpu.txt @@ -1,3 +1,3 @@ accelerate -torch +torch==2.4.1 transformers \ No newline at end of file diff --git a/backend/python/vllm/requirements-cublas11.txt b/backend/python/vllm/requirements-cublas11.txt index c448a91d..a6e49c1f 100644 --- a/backend/python/vllm/requirements-cublas11.txt +++ b/backend/python/vllm/requirements-cublas11.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/cu118 accelerate -torch +torch==2.4.1+cu118 transformers bitsandbytes \ No newline at end of file diff --git a/backend/python/vllm/requirements-cublas12.txt b/backend/python/vllm/requirements-cublas12.txt index e007f094..2dfc28f9 100644 --- a/backend/python/vllm/requirements-cublas12.txt +++ b/backend/python/vllm/requirements-cublas12.txt @@ -1,4 +1,4 @@ accelerate -torch +torch==2.4.1 transformers bitsandbytes \ No newline at end of file diff --git a/backend/python/vllm/requirements-hipblas.txt b/backend/python/vllm/requirements-hipblas.txt index 9dff852d..f580314a 100644 --- a/backend/python/vllm/requirements-hipblas.txt +++ b/backend/python/vllm/requirements-hipblas.txt @@ -1,5 +1,5 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 accelerate -torch +torch==2.4.1+rocm6.0 transformers bitsandbytes \ No newline at end of file From 134ea1a37b3cd097317c5c40effa367aeabc794c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 18 Oct 2024 10:31:21 +0200 Subject: [PATCH 0380/1530] fix(dependencies): move deps that brings pytorch (#3873) * fix(dependencies): move deps that brings pytorch Signed-off-by: Ettore Di Giacinto * chore(deps): pin llvmlite Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto --- backend/python/coqui/requirements-cpu.txt | 3 ++- backend/python/coqui/requirements-cublas11.txt | 3 ++- backend/python/coqui/requirements-cublas12.txt | 3 ++- backend/python/coqui/requirements-hipblas.txt | 3 ++- backend/python/coqui/requirements-intel.txt | 3 ++- backend/python/coqui/requirements.txt | 1 - backend/python/openvoice/requirements-cpu.txt | 4 +++- backend/python/openvoice/requirements-cublas11.txt | 2 ++ backend/python/openvoice/requirements-cublas12.txt | 4 +++- backend/python/openvoice/requirements-hipblas.txt | 4 +++- backend/python/openvoice/requirements-intel.txt | 1 + backend/python/openvoice/requirements.txt | 3 +-- 12 files changed, 23 insertions(+), 11 deletions(-) diff --git a/backend/python/coqui/requirements-cpu.txt b/backend/python/coqui/requirements-cpu.txt index 2021fc20..c5201d62 100644 --- a/backend/python/coqui/requirements-cpu.txt +++ b/backend/python/coqui/requirements-cpu.txt @@ -1,3 +1,4 @@ transformers accelerate -torch==2.4.1 \ No newline at end of file +torch==2.4.1 +coqui-tts \ No newline at end of file diff --git a/backend/python/coqui/requirements-cublas11.txt b/backend/python/coqui/requirements-cublas11.txt index 9f8fe9ff..35fd4f42 100644 --- a/backend/python/coqui/requirements-cublas11.txt +++ b/backend/python/coqui/requirements-cublas11.txt @@ -2,4 +2,5 @@ torch==2.4.1+cu118 torchaudio==2.4.1+cu118 transformers -accelerate \ No newline at end of file +accelerate +coqui-tts \ No newline at end of file diff --git a/backend/python/coqui/requirements-cublas12.txt b/backend/python/coqui/requirements-cublas12.txt index 53716949..fac719d4 100644 --- a/backend/python/coqui/requirements-cublas12.txt +++ b/backend/python/coqui/requirements-cublas12.txt @@ -1,4 +1,5 @@ torch==2.4.1 torchaudio==2.4.1 transformers -accelerate \ No newline at end of file +accelerate +coqui-tts \ No newline at end of file diff --git a/backend/python/coqui/requirements-hipblas.txt b/backend/python/coqui/requirements-hipblas.txt index 1d54fb16..359e5867 100644 --- a/backend/python/coqui/requirements-hipblas.txt +++ b/backend/python/coqui/requirements-hipblas.txt @@ -2,4 +2,5 @@ torch==2.4.1+rocm6.0 torchaudio==2.4.1+rocm6.0 transformers -accelerate \ No newline at end of file +accelerate +coqui-tts \ No newline at end of file diff --git a/backend/python/coqui/requirements-intel.txt b/backend/python/coqui/requirements-intel.txt index c0e4dcaa..de3b4ee4 100644 --- a/backend/python/coqui/requirements-intel.txt +++ b/backend/python/coqui/requirements-intel.txt @@ -5,4 +5,5 @@ torchaudio optimum[openvino] setuptools==75.1.0 # https://github.com/mudler/LocalAI/issues/2406 transformers -accelerate \ No newline at end of file +accelerate +coqui-tts \ No newline at end of file diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index 0ad62f70..6be5d8ac 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,3 @@ -coqui-tts grpcio==1.67.0 protobuf certifi \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cpu.txt b/backend/python/openvoice/requirements-cpu.txt index 20f84cf7..c5368563 100644 --- a/backend/python/openvoice/requirements-cpu.txt +++ b/backend/python/openvoice/requirements-cpu.txt @@ -1 +1,3 @@ -torch==2.4.1 \ No newline at end of file +torch==2.4.1 +git+https://github.com/myshell-ai/MeloTTS.git +git+https://github.com/myshell-ai/OpenVoice.git \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cublas11.txt b/backend/python/openvoice/requirements-cublas11.txt index cf469472..3828e652 100644 --- a/backend/python/openvoice/requirements-cublas11.txt +++ b/backend/python/openvoice/requirements-cublas11.txt @@ -1,2 +1,4 @@ --extra-index-url https://download.pytorch.org/whl/cu118 torch==2.4.1+cu118 +git+https://github.com/myshell-ai/MeloTTS.git +git+https://github.com/myshell-ai/OpenVoice.git \ No newline at end of file diff --git a/backend/python/openvoice/requirements-cublas12.txt b/backend/python/openvoice/requirements-cublas12.txt index 20f84cf7..c5368563 100644 --- a/backend/python/openvoice/requirements-cublas12.txt +++ b/backend/python/openvoice/requirements-cublas12.txt @@ -1 +1,3 @@ -torch==2.4.1 \ No newline at end of file +torch==2.4.1 +git+https://github.com/myshell-ai/MeloTTS.git +git+https://github.com/myshell-ai/OpenVoice.git \ No newline at end of file diff --git a/backend/python/openvoice/requirements-hipblas.txt b/backend/python/openvoice/requirements-hipblas.txt index 76018445..453ce542 100644 --- a/backend/python/openvoice/requirements-hipblas.txt +++ b/backend/python/openvoice/requirements-hipblas.txt @@ -1,2 +1,4 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 -torch \ No newline at end of file +torch==2.4.1+rocm6.0 +git+https://github.com/myshell-ai/MeloTTS.git +git+https://github.com/myshell-ai/OpenVoice.git \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index b446386f..46c67465 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -21,3 +21,4 @@ jieba==0.42.1 gradio==4.44.1 langid==1.1.6 git+https://github.com/myshell-ai/MeloTTS.git +git+https://github.com/myshell-ai/OpenVoice.git diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index fd1268d0..a6628cdb 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -16,5 +16,4 @@ cn2an==0.5.22 jieba==0.42.1 gradio langid==1.1.6 -git+https://github.com/myshell-ai/MeloTTS.git -git+https://github.com/myshell-ai/OpenVoice.git +llvmlite==0.43.0 \ No newline at end of file From 8f2cf52f3bb30d384c873e8513a0d412b7e8abaf Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 18 Oct 2024 15:18:56 +0200 Subject: [PATCH 0381/1530] chore(deps): pin packaging (#3875) Signed-off-by: Ettore Di Giacinto --- backend/python/coqui/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index 6be5d8ac..d0c6d72f 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,3 +1,4 @@ grpcio==1.67.0 protobuf -certifi \ No newline at end of file +certifi +packaging==24.1 \ No newline at end of file From 398a9efa3afa45b9c39fec15357e261d8d6c0b59 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 18 Oct 2024 16:59:31 +0200 Subject: [PATCH 0382/1530] chore(deps): pin numpy (#3876) Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index a6628cdb..ff463748 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -4,7 +4,7 @@ librosa faster-whisper pydub==0.25.1 wavmark==0.0.3 -numpy +numpy==1.22.0 eng_to_ipa==0.0.2 inflect unidecode From 9c425d55f6fe1b8656e90dd3762b065c9282f150 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 18 Oct 2024 18:21:48 +0200 Subject: [PATCH 0383/1530] chore(deps): pin networkx (#3878) Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index ff463748..9cc35b88 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -13,6 +13,7 @@ openai python-dotenv pypinyin cn2an==0.5.22 +networkx==3.4.1 jieba==0.42.1 gradio langid==1.1.6 From 963e5903fc652c499ee5acf07cf2465e6165599e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 18 Oct 2024 19:36:55 +0200 Subject: [PATCH 0384/1530] chore(deps): downgrade networkx Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index 9cc35b88..259461eb 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -13,8 +13,8 @@ openai python-dotenv pypinyin cn2an==0.5.22 -networkx==3.4.1 +networkx==2.8.8 jieba==0.42.1 gradio langid==1.1.6 -llvmlite==0.43.0 \ No newline at end of file +llvmlite==0.43.0 From cffecda48c60727015a0d42bf0ce4ae3c6ed1c43 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 18 Oct 2024 23:43:38 +0200 Subject: [PATCH 0385/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `afd9909a6481402844aecefa8a8908afdd7f52f1` (#3879) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 48c8d188..ddc8a42d 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=99bd4ac28c32cd17c0e337ff5601393b033dc5fc +CPPLLAMA_VERSION?=afd9909a6481402844aecefa8a8908afdd7f52f1 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 87f78ecfa9447327c21318328dab2a0a3a053846 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 19 Oct 2024 09:00:25 +0200 Subject: [PATCH 0386/1530] chore(open voice): pin gradio version in requirements.txt Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index 259461eb..c74eba87 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -15,6 +15,6 @@ pypinyin cn2an==0.5.22 networkx==2.8.8 jieba==0.42.1 -gradio +gradio==3.48.0 langid==1.1.6 llvmlite==0.43.0 From cdbcac6a78464375833ba23551bf9b22d7597e04 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 19 Oct 2024 11:16:23 +0200 Subject: [PATCH 0387/1530] fix(sycl): drop gradio pin Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements-intel.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index 46c67465..c37bb1ce 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -18,7 +18,6 @@ python-dotenv pypinyin==0.50.0 cn2an==0.5.22 jieba==0.42.1 -gradio==4.44.1 langid==1.1.6 git+https://github.com/myshell-ai/MeloTTS.git git+https://github.com/myshell-ai/OpenVoice.git From 7ee25ecfb3c13e2c57b98e37cb783a6a71152f75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 19 Oct 2024 11:24:34 +0000 Subject: [PATCH 0388/1530] chore(deps): Bump gradio from 3.48.0 to 5.0.0 in /backend/python/openvoice in the pip group (#3880) chore(deps): Bump gradio in /backend/python/openvoice in the pip group Bumps the pip group in /backend/python/openvoice with 1 update: [gradio](https://github.com/gradio-app/gradio). Updates `gradio` from 3.48.0 to 5.0.0 - [Release notes](https://github.com/gradio-app/gradio/releases) - [Changelog](https://github.com/gradio-app/gradio/blob/main/CHANGELOG.md) - [Commits](https://github.com/gradio-app/gradio/compare/gradio@3.48.0...gradio@5.0.0) --- updated-dependencies: - dependency-name: gradio dependency-type: direct:production dependency-group: pip ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/openvoice/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index c74eba87..c2e3f01f 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -15,6 +15,6 @@ pypinyin cn2an==0.5.22 networkx==2.8.8 jieba==0.42.1 -gradio==3.48.0 +gradio==5.0.0 langid==1.1.6 llvmlite==0.43.0 From 7c502ec2093d2694e5bb2fab527a3bfa7b24d4b9 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 19 Oct 2024 13:54:40 +0200 Subject: [PATCH 0389/1530] Revert "chore(deps): Bump gradio from 3.48.0 to 5.0.0 in /backend/python/openvoice in the pip group" (#3881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert "chore(deps): Bump gradio from 3.48.0 to 5.0.0 in /backend/python/open…" This reverts commit 7ee25ecfb3c13e2c57b98e37cb783a6a71152f75. --- backend/python/openvoice/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index c2e3f01f..c74eba87 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -15,6 +15,6 @@ pypinyin cn2an==0.5.22 networkx==2.8.8 jieba==0.42.1 -gradio==5.0.0 +gradio==3.48.0 langid==1.1.6 llvmlite==0.43.0 From 64721606b90a816394496f094b5ebf24240a203e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 19 Oct 2024 13:56:46 +0200 Subject: [PATCH 0390/1530] chore(does): pin deps in requirements-intel.txt Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index c37bb1ce..d018e6a5 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -12,7 +12,7 @@ numpy==1.26.4 eng_to_ipa==0.0.2 inflect==7.0.0 unidecode==1.3.7 -whisper-timestamped==1.15.4 +whisper-timestamped==1.14.2 openai python-dotenv pypinyin==0.50.0 From c967ac37bc868e3242a197bfdb3df429be7f2730 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 19 Oct 2024 16:01:31 +0200 Subject: [PATCH 0391/1530] chore(openvoice/deps): pin numpy in requirements-intel.txt Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index d018e6a5..f7c2cab0 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -8,7 +8,7 @@ librosa==0.9.1 faster-whisper==1.0.3 pydub==0.25.1 wavmark==0.0.3 -numpy==1.26.4 +numpy==1.22.0 eng_to_ipa==0.0.2 inflect==7.0.0 unidecode==1.3.7 From 011565aaa34bfbd5913e473135f2810a618a0e07 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 19 Oct 2024 23:04:42 +0200 Subject: [PATCH 0392/1530] chore(openvoice): pin faster-whisper in requirements-intel.txt Signed-off-by: Ettore Di Giacinto --- backend/python/openvoice/requirements-intel.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index f7c2cab0..12195016 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -5,7 +5,7 @@ optimum[openvino] grpcio==1.67.0 protobuf librosa==0.9.1 -faster-whisper==1.0.3 +faster-whisper==0.9.0 pydub==0.25.1 wavmark==0.0.3 numpy==1.22.0 From 32db7879915a3910671872ac9e5245e44e9e032e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 20 Oct 2024 00:26:49 +0200 Subject: [PATCH 0393/1530] chore(deps): bump llama-cpp to cda0e4b648dde8fac162b3430b14a99597d3d74f (#3884) Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/grpc-server.cpp | 41 +++++++++++++++++++++++++++---- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index ddc8a42d..bccab4b0 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=afd9909a6481402844aecefa8a8908afdd7f52f1 +CPPLLAMA_VERSION?=cda0e4b648dde8fac162b3430b14a99597d3d74f # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index c61b9d4b..d6e1b995 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -391,6 +391,39 @@ struct llama_metrics { } }; +struct llava_embd_batch { + std::vector pos; + std::vector n_seq_id; + std::vector seq_id_0; + std::vector seq_ids; + std::vector logits; + llama_batch batch; + llava_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) { + pos .resize(n_tokens); + n_seq_id.resize(n_tokens); + seq_ids .resize(n_tokens + 1); + logits .resize(n_tokens); + seq_id_0.resize(1); + seq_id_0[0] = seq_id; + seq_ids [n_tokens] = nullptr; + batch = { + /*n_tokens =*/ n_tokens, + /*tokens =*/ nullptr, + /*embd =*/ embd, + /*pos =*/ pos.data(), + /*n_seq_id =*/ n_seq_id.data(), + /*seq_id =*/ seq_ids.data(), + /*logits =*/ logits.data(), + }; + for (int i = 0; i < n_tokens; i++) { + batch.pos [i] = pos_0 + i; + batch.n_seq_id[i] = 1; + batch.seq_id [i] = seq_id_0.data(); + batch.logits [i] = false; + } + } +}; + struct llama_server_context { llama_model *model = nullptr; @@ -934,7 +967,6 @@ struct llama_server_context batch.n_seq_id + i, batch.seq_id + i, batch.logits + i, - 0, 0, 0, // unused }; if (llama_decode(ctx, batch_view) != 0) { @@ -1379,7 +1411,6 @@ struct llama_server_context batch.n_seq_id + i, batch.seq_id + i, batch.logits + i, - 0, 0, 0, // unused }; if (llama_decode(ctx, batch_view)) { @@ -1398,8 +1429,9 @@ struct llama_server_context } const int n_embd = llama_n_embd(model); - llama_batch batch_img = { n_eval, nullptr, (img.image_embedding + i * n_embd), nullptr, nullptr, nullptr, nullptr, slot.n_past, 1, 0, }; - if (llama_decode(ctx, batch_img)) + float * embd = img.image_embedding + i * n_embd; + llava_embd_batch llava_batch = llava_embd_batch(embd, n_eval, slot.n_past, 0); + if (llama_decode(ctx, llava_batch.batch)) { LOG("%s : failed to eval image\n", __func__); return false; @@ -1904,7 +1936,6 @@ struct llama_server_context batch.n_seq_id + i, batch.seq_id + i, batch.logits + i, - 0, 0, 0, // unused }; const int ret = llama_decode(ctx, batch_view); From 26c4058be462c2d9d579fb00621efad1aae8268a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 20 Oct 2024 11:44:28 +0200 Subject: [PATCH 0394/1530] fix(vllm): do not set videos if we don't have any (#3885) Signed-off-by: Ettore Di Giacinto --- backend/python/vllm/backend.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index 98f292ab..023a14bc 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -219,13 +219,15 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): # Generate text using the LLM engine request_id = random_uuid() print(f"Generating text with request_id: {request_id}", file=sys.stderr) + multi_modal_data = {} + if image_data: + multi_modal_data["image"] = image_data + if video_data: + multi_modal_data["video"] = video_data outputs = self.llm.generate( { - "prompt": prompt, - "multi_modal_data": { - "image": image_data if image_data else None, - "video": video_data if video_data else None, - } if image_data or video_data else None, + "prompt": prompt, + "multi_modal_data": multi_modal_data if multi_modal_data else None, }, sampling_params=sampling_params, request_id=request_id, @@ -279,7 +281,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): return image except Exception as e: print(f"Error loading image {image_path}: {e}", file=sys.stderr) - return self.load_video(image_path) + return None def load_video(self, video_path: str): """ From 313ea2c4d21ff816707d7c80651276f3ac8a79de Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 20 Oct 2024 23:40:26 +0200 Subject: [PATCH 0395/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `45f097645efb11b6d09a5b4adbbfd7c312ac0126` (#3889) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bccab4b0..a64eb71e 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=cda0e4b648dde8fac162b3430b14a99597d3d74f +CPPLLAMA_VERSION?=45f097645efb11b6d09a5b4adbbfd7c312ac0126 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 015835dba2854572d50e167b7cade05af41ed214 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 21 Oct 2024 11:47:52 +0200 Subject: [PATCH 0396/1530] models(gallery): add phi-3 vision (#3890) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 13 +++++++++++++ gallery/phi-3-vision.yaml | 23 +++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 gallery/phi-3-vision.yaml diff --git a/gallery/index.yaml b/gallery/index.yaml index 46cdc954..28cd50b6 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -6225,6 +6225,19 @@ - filename: Phi-3.5-mini-TitanFusion-0.2.Q4_K_M.gguf sha256: 9579305712f2bca246914639c4873acdc1e7bc64ac2c7db0230df4f0ca0ef234 uri: huggingface://mradermacher/Phi-3.5-mini-TitanFusion-0.2-GGUF/Phi-3.5-mini-TitanFusion-0.2.Q4_K_M.gguf +- !!merge <<: *phi-3 + name: "phi-3-vision:vllm" + url: "github:mudler/LocalAI/gallery/phi-3-vision.yaml@master" + description: | + Phi-3.5-vision is a lightweight, state-of-the-art open multimodal model built upon datasets which include - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data both on text and vision. The model belongs to the Phi-3 model family, and the multimodal version comes with 128K context length (in tokens) it can support. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning and direct preference optimization to ensure precise instruction adherence and robust safety measures. +- !!merge <<: *phi-3 + name: "phi-3.5-vision:vllm" + url: "github:mudler/LocalAI/gallery/phi-3-vision.yaml@master" + override: + parameters: + model: microsoft/Phi-3.5-vision-instruct + description: | + Phi-3.5-vision is a lightweight, state-of-the-art open multimodal model built upon datasets which include - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data both on text and vision. The model belongs to the Phi-3 model family, and the multimodal version comes with 128K context length (in tokens) it can support. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning and direct preference optimization to ensure precise instruction adherence and robust safety measures. - &hermes-2-pro-mistral ### START Hermes url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" diff --git a/gallery/phi-3-vision.yaml b/gallery/phi-3-vision.yaml new file mode 100644 index 00000000..1a3d03af --- /dev/null +++ b/gallery/phi-3-vision.yaml @@ -0,0 +1,23 @@ +--- +name: "phi3-vision" + +config_file: | + name: phi3-vision + backend: vllm + parameters: + model: microsoft/Phi-3-vision-128k-instruct + trust_remote_code: true + max_model_len: 32768 + template: + chat_message: |- + <|{{ .RoleName }}|> + {{.Content}}<|end|> + chat: >- + {{.Input}} + + <|assistant|> + + completion: | + {{.Input}} + use_tokenizer_template: false + image: "<|image_{{ add1 .ID }}|>\n{{.Text}}" From ee5ca49bc1f405e63e7ed9c5cc94016ad6e76024 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:29:56 +0200 Subject: [PATCH 0397/1530] chore(deps): Bump llama-index from 0.11.17 to 0.11.19 in /examples/chainlit (#3893) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.17 to 0.11.19. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.17...v0.11.19) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 0d60c193..20494895 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.17 +llama_index==0.11.19 requests==2.32.3 weaviate_client==4.8.1 transformers From 5e0847b3d71da115a52042cf0a702d412a740af5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:30:16 +0200 Subject: [PATCH 0398/1530] chore(deps): Bump weaviate-client from 4.8.1 to 4.9.0 in /examples/chainlit (#3894) chore(deps): Bump weaviate-client in /examples/chainlit Bumps [weaviate-client](https://github.com/weaviate/weaviate-python-client) from 4.8.1 to 4.9.0. - [Release notes](https://github.com/weaviate/weaviate-python-client/releases) - [Changelog](https://github.com/weaviate/weaviate-python-client/blob/main/docs/changelog.rst) - [Commits](https://github.com/weaviate/weaviate-python-client/compare/v4.8.1...v4.9.0) --- updated-dependencies: - dependency-name: weaviate-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 20494895..3f4adf48 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,6 +1,6 @@ llama_index==0.11.19 requests==2.32.3 -weaviate_client==4.8.1 +weaviate_client==4.9.0 transformers torch chainlit From e0c876aae1dd6cc55376164ea13b5bb9a87a4d43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:30:28 +0200 Subject: [PATCH 0399/1530] chore(deps): Bump langchain from 0.3.3 to 0.3.4 in /examples/functions (#3900) Bumps [langchain](https://github.com/langchain-ai/langchain) from 0.3.3 to 0.3.4. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain==0.3.3...langchain==0.3.4) --- updated-dependencies: - dependency-name: langchain dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 48e6a25a..2ff4cb2a 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ -langchain==0.3.3 +langchain==0.3.4 openai==1.51.2 From db401b4d8498137009596f8dbd63d474e1dead42 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:30:52 +0200 Subject: [PATCH 0400/1530] chore(deps): Bump langchain-community from 0.3.2 to 0.3.3 in /examples/langchain/langchainpy-localai-example (#3923) chore(deps): Bump langchain-community Bumps [langchain-community](https://github.com/langchain-ai/langchain) from 0.3.2 to 0.3.3. - [Release notes](https://github.com/langchain-ai/langchain/releases) - [Commits](https://github.com/langchain-ai/langchain/compare/langchain-community==0.3.2...langchain-community==0.3.3) --- updated-dependencies: - dependency-name: langchain-community dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index d4dfe947..8961dd87 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -11,7 +11,7 @@ frozenlist==1.4.1 greenlet==3.1.1 idna==3.10 langchain==0.3.3 -langchain-community==0.3.2 +langchain-community==0.3.3 marshmallow==3.22.0 marshmallow-enum==1.5.1 multidict==6.1.0 From 103af480c79dd6a062e7aca0334f8f75c4c416bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:31:15 +0200 Subject: [PATCH 0401/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `007cc20` to `06e70da` (#3932) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `007cc20` to `06e70da`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/007cc20686f04ca1f911975f20f097175dd72a7f...06e70da8a6fb2043fe7e56b818ff638a309c8239) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index 007cc206..06e70da8 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit 007cc20686f04ca1f911975f20f097175dd72a7f +Subproject commit 06e70da8a6fb2043fe7e56b818ff638a309c8239 From 06951cdd6b5ecbe9d6e0f1f05bac4e2de5327be2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:31:30 +0200 Subject: [PATCH 0402/1530] chore(deps): Bump sqlalchemy from 2.0.35 to 2.0.36 in /examples/langchain/langchainpy-localai-example (#3920) chore(deps): Bump sqlalchemy Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 2.0.35 to 2.0.36. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 8961dd87..731dd7d8 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -24,7 +24,7 @@ packaging>=23.2 pydantic==2.9.2 PyYAML==6.0.2 requests==2.32.3 -SQLAlchemy==2.0.35 +SQLAlchemy==2.0.36 tenacity==8.5.0 tqdm==4.66.5 typing-inspect==0.9.0 From 11d34e38dceba2af6131ef2e0b8b938d27b026bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:31:47 +0200 Subject: [PATCH 0403/1530] chore(deps): Bump yarl from 1.15.2 to 1.15.5 in /examples/langchain/langchainpy-localai-example (#3921) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.15.2 to 1.15.5. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.15.2...v1.15.5) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 731dd7d8..5af0a62c 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.3 -yarl==1.15.2 +yarl==1.15.5 From 2810e3ea5c6bf8e784d8c675d05497efc395fdfe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:32:14 +0200 Subject: [PATCH 0404/1530] chore(deps): Bump openai from 1.51.2 to 1.52.0 in /examples/langchain-chroma (#3908) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.51.2 to 1.52.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.51.2...v1.52.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 367cee06..34c42b2f 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.3 -openai==1.51.2 +openai==1.52.0 chromadb==0.5.13 llama-index==0.11.17 \ No newline at end of file From 42136b6f27bec351524393e38e18c3c634303e52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:32:34 +0200 Subject: [PATCH 0405/1530] chore(deps): Bump llama-index from 0.11.17 to 0.11.19 in /examples/langchain-chroma (#3907) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.17 to 0.11.19. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.17...v0.11.19) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 34c42b2f..d8d4f480 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.3 openai==1.52.0 chromadb==0.5.13 -llama-index==0.11.17 \ No newline at end of file +llama-index==0.11.19 \ No newline at end of file From 6f0c936f74590b8206302b178384889f549f45e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:32:45 +0200 Subject: [PATCH 0406/1530] chore(deps): Bump marshmallow from 3.22.0 to 3.23.0 in /examples/langchain/langchainpy-localai-example (#3917) chore(deps): Bump marshmallow Bumps [marshmallow](https://github.com/marshmallow-code/marshmallow) from 3.22.0 to 3.23.0. - [Changelog](https://github.com/marshmallow-code/marshmallow/blob/dev/CHANGELOG.rst) - [Commits](https://github.com/marshmallow-code/marshmallow/compare/3.22.0...3.23.0) --- updated-dependencies: - dependency-name: marshmallow dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 5af0a62c..b1afdea5 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -12,7 +12,7 @@ greenlet==3.1.1 idna==3.10 langchain==0.3.3 langchain-community==0.3.3 -marshmallow==3.22.0 +marshmallow==3.23.0 marshmallow-enum==1.5.1 multidict==6.1.0 mypy-extensions==1.0.0 From 0f6b4513bfb437369591abdf6dae52589b41b1ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:32:55 +0200 Subject: [PATCH 0407/1530] chore(deps): Bump openai from 1.51.2 to 1.52.0 in /examples/functions (#3901) Bumps [openai](https://github.com/openai/openai-python) from 1.51.2 to 1.52.0. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.51.2...v1.52.0) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 2ff4cb2a..0218e59c 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.4 -openai==1.51.2 +openai==1.52.0 From b8eb10b6b712fd5267e1a7ac2dcedb42fca0ef08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:33:14 +0200 Subject: [PATCH 0408/1530] chore(deps): Bump yarl from 1.15.5 to 1.16.0 in /examples/langchain/langchainpy-localai-example (#3938) chore(deps): Bump yarl Bumps [yarl](https://github.com/aio-libs/yarl) from 1.15.5 to 1.16.0. - [Release notes](https://github.com/aio-libs/yarl/releases) - [Changelog](https://github.com/aio-libs/yarl/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/yarl/compare/v1.15.5...v1.16.0) --- updated-dependencies: - dependency-name: yarl dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index b1afdea5..2cf468f1 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -30,4 +30,4 @@ tqdm==4.66.5 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.3 -yarl==1.15.5 +yarl==1.16.0 From dc14d80f5157c4d00e10d46ff85239706e01bdea Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:33:29 +0200 Subject: [PATCH 0409/1530] docs: :arrow_up: update docs version mudler/LocalAI (#3936) :arrow_up: Update docs version mudler/LocalAI Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- docs/data/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data/version.json b/docs/data/version.json index 1814b362..75cde0f9 100644 --- a/docs/data/version.json +++ b/docs/data/version.json @@ -1,3 +1,3 @@ { - "version": "v2.22.0" + "version": "v2.22.1" } From a1d6cc93a85de05d2fb85ad47ba8a8198bea3adf Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:33:55 +0200 Subject: [PATCH 0410/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `e01c67affe450638162a1a457e2e57859ef6ebf0` (#3937) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a64eb71e..a3f402ae 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=45f097645efb11b6d09a5b4adbbfd7c312ac0126 +CPPLLAMA_VERSION?=e01c67affe450638162a1a457e2e57859ef6ebf0 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From ccc7cb0287eaba505d033b3511bf9469b4dde4e7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 22 Oct 2024 09:34:05 +0200 Subject: [PATCH 0411/1530] feat(templates): use a single template for multimodals messages (#3892) Signed-off-by: Ettore Di Giacinto --- core/config/backend_config.go | 4 +- core/http/endpoints/openai/request.go | 43 ++++++++-------- pkg/templates/multimodal.go | 50 +++++++++++++++++-- pkg/templates/multimodal_test.go | 72 ++++++++++++++++++++++++++- 4 files changed, 140 insertions(+), 29 deletions(-) diff --git a/core/config/backend_config.go b/core/config/backend_config.go index 79e134d8..b386d096 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -197,9 +197,7 @@ type TemplateConfig struct { // It defaults to \n JoinChatMessagesByCharacter *string `yaml:"join_chat_messages_by_character"` - Video string `yaml:"video"` - Image string `yaml:"image"` - Audio string `yaml:"audio"` + Multimodal string `yaml:"multimodal"` } func (c *BackendConfig) UnmarshalYAML(value *yaml.Node) error { diff --git a/core/http/endpoints/openai/request.go b/core/http/endpoints/openai/request.go index a418433e..1309fa82 100644 --- a/core/http/endpoints/openai/request.go +++ b/core/http/endpoints/openai/request.go @@ -149,6 +149,10 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque // Decode each request's message content imgIndex, vidIndex, audioIndex := 0, 0, 0 for i, m := range input.Messages { + nrOfImgsInMessage := 0 + nrOfVideosInMessage := 0 + nrOfAudiosInMessage := 0 + switch content := m.Content.(type) { case string: input.Messages[i].StringContent = content @@ -156,11 +160,16 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque dat, _ := json.Marshal(content) c := []schema.Content{} json.Unmarshal(dat, &c) + + textContent := "" + // we will template this at the end + CONTENT: for _, pp := range c { switch pp.Type { case "text": - input.Messages[i].StringContent = pp.Text + textContent += pp.Text + //input.Messages[i].StringContent = pp.Text case "video", "video_url": // Decode content as base64 either if it's an URL or base64 text base64, err := utils.GetContentURIAsBase64(pp.VideoURL.URL) @@ -169,14 +178,8 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque continue CONTENT } input.Messages[i].StringVideos = append(input.Messages[i].StringVideos, base64) // TODO: make sure that we only return base64 stuff - - t := "[vid-{{.ID}}]{{.Text}}" - if config.TemplateConfig.Video != "" { - t = config.TemplateConfig.Video - } - // set a placeholder for each image - input.Messages[i].StringContent, _ = templates.TemplateMultiModal(t, vidIndex, input.Messages[i].StringContent) vidIndex++ + nrOfVideosInMessage++ case "audio_url", "audio": // Decode content as base64 either if it's an URL or base64 text base64, err := utils.GetContentURIAsBase64(pp.AudioURL.URL) @@ -185,13 +188,8 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque continue CONTENT } input.Messages[i].StringAudios = append(input.Messages[i].StringAudios, base64) // TODO: make sure that we only return base64 stuff - // set a placeholder for each image - t := "[audio-{{.ID}}]{{.Text}}" - if config.TemplateConfig.Audio != "" { - t = config.TemplateConfig.Audio - } - input.Messages[i].StringContent, _ = templates.TemplateMultiModal(t, audioIndex, input.Messages[i].StringContent) audioIndex++ + nrOfAudiosInMessage++ case "image_url", "image": // Decode content as base64 either if it's an URL or base64 text base64, err := utils.GetContentURIAsBase64(pp.ImageURL.URL) @@ -200,16 +198,21 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque continue CONTENT } - t := "[img-{{.ID}}]{{.Text}}" - if config.TemplateConfig.Image != "" { - t = config.TemplateConfig.Image - } input.Messages[i].StringImages = append(input.Messages[i].StringImages, base64) // TODO: make sure that we only return base64 stuff - // set a placeholder for each image - input.Messages[i].StringContent, _ = templates.TemplateMultiModal(t, imgIndex, input.Messages[i].StringContent) + imgIndex++ + nrOfImgsInMessage++ } } + + input.Messages[i].StringContent, _ = templates.TemplateMultiModal(config.TemplateConfig.Multimodal, templates.MultiModalOptions{ + TotalImages: imgIndex, + TotalVideos: vidIndex, + TotalAudios: audioIndex, + ImagesInMessage: nrOfImgsInMessage, + VideosInMessage: nrOfVideosInMessage, + AudiosInMessage: nrOfAudiosInMessage, + }, textContent) } } diff --git a/pkg/templates/multimodal.go b/pkg/templates/multimodal.go index a2056640..3a19b07a 100644 --- a/pkg/templates/multimodal.go +++ b/pkg/templates/multimodal.go @@ -7,20 +7,60 @@ import ( "github.com/Masterminds/sprig/v3" ) -func TemplateMultiModal(templateString string, templateID int, text string) (string, error) { +type MultiModalOptions struct { + TotalImages int + TotalAudios int + TotalVideos int + + ImagesInMessage int + AudiosInMessage int + VideosInMessage int +} + +type MultimodalContent struct { + ID int +} + +const DefaultMultiModalTemplate = "{{ range .Audio }}[audio-{{.ID}}]{{end}}{{ range .Images }}[img-{{.ID}}]{{end}}{{ range .Video }}[vid-{{.ID}}]{{end}}{{.Text}}" + +func TemplateMultiModal(templateString string, opts MultiModalOptions, text string) (string, error) { + if templateString == "" { + templateString = DefaultMultiModalTemplate + } + // compile the template tmpl, err := template.New("template").Funcs(sprig.FuncMap()).Parse(templateString) if err != nil { return "", err } + + videos := []MultimodalContent{} + for i := 0; i < opts.VideosInMessage; i++ { + videos = append(videos, MultimodalContent{ID: i + (opts.TotalVideos - opts.VideosInMessage)}) + } + + audios := []MultimodalContent{} + for i := 0; i < opts.AudiosInMessage; i++ { + audios = append(audios, MultimodalContent{ID: i + (opts.TotalAudios - opts.AudiosInMessage)}) + } + + images := []MultimodalContent{} + for i := 0; i < opts.ImagesInMessage; i++ { + images = append(images, MultimodalContent{ID: i + (opts.TotalImages - opts.ImagesInMessage)}) + } + result := bytes.NewBuffer(nil) // execute the template err = tmpl.Execute(result, struct { - ID int - Text string + Audio []MultimodalContent + Images []MultimodalContent + Video []MultimodalContent + Text string }{ - ID: templateID, - Text: text, + Audio: audios, + Images: images, + Video: videos, + Text: text, }) return result.String(), err } diff --git a/pkg/templates/multimodal_test.go b/pkg/templates/multimodal_test.go index d1a8bd5b..ef8607a7 100644 --- a/pkg/templates/multimodal_test.go +++ b/pkg/templates/multimodal_test.go @@ -11,7 +11,77 @@ import ( var _ = Describe("EvaluateTemplate", func() { Context("templating simple strings for multimodal chat", func() { It("should template messages correctly", func() { - result, err := TemplateMultiModal("[img-{{.ID}}]{{.Text}}", 1, "bar") + result, err := TemplateMultiModal("", MultiModalOptions{ + TotalImages: 1, + TotalAudios: 0, + TotalVideos: 0, + ImagesInMessage: 1, + AudiosInMessage: 0, + VideosInMessage: 0, + }, "bar") + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal("[img-0]bar")) + }) + + It("should handle messages with more images correctly", func() { + result, err := TemplateMultiModal("", MultiModalOptions{ + TotalImages: 2, + TotalAudios: 0, + TotalVideos: 0, + ImagesInMessage: 2, + AudiosInMessage: 0, + VideosInMessage: 0, + }, "bar") + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal("[img-0][img-1]bar")) + }) + It("should handle messages with more images correctly", func() { + result, err := TemplateMultiModal("", MultiModalOptions{ + TotalImages: 4, + TotalAudios: 1, + TotalVideos: 0, + ImagesInMessage: 2, + AudiosInMessage: 1, + VideosInMessage: 0, + }, "bar") + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal("[audio-0][img-2][img-3]bar")) + }) + It("should handle messages with more images correctly", func() { + result, err := TemplateMultiModal("", MultiModalOptions{ + TotalImages: 3, + TotalAudios: 1, + TotalVideos: 0, + ImagesInMessage: 1, + AudiosInMessage: 1, + VideosInMessage: 0, + }, "bar") + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal("[audio-0][img-2]bar")) + }) + It("should handle messages with more images correctly", func() { + result, err := TemplateMultiModal("", MultiModalOptions{ + TotalImages: 0, + TotalAudios: 0, + TotalVideos: 0, + ImagesInMessage: 0, + AudiosInMessage: 0, + VideosInMessage: 0, + }, "bar") + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal("bar")) + }) + }) + Context("templating with custom defaults", func() { + It("should handle messages with more images correctly", func() { + result, err := TemplateMultiModal("{{ range .Audio }}[audio-{{ add1 .ID}}]{{end}}{{ range .Images }}[img-{{ add1 .ID}}]{{end}}{{ range .Video }}[vid-{{ add1 .ID}}]{{end}}{{.Text}}", MultiModalOptions{ + TotalImages: 1, + TotalAudios: 0, + TotalVideos: 0, + ImagesInMessage: 1, + AudiosInMessage: 0, + VideosInMessage: 0, + }, "bar") Expect(err).NotTo(HaveOccurred()) Expect(result).To(Equal("[img-1]bar")) }) From 6fd0341ecadb5e1f4e4baf5e533d807f7eb183ee Mon Sep 17 00:00:00 2001 From: Dave Date: Wed, 23 Oct 2024 05:16:38 -0400 Subject: [PATCH 0412/1530] chore: update go-piper to latest (#3939) Signed-off-by: Dave Lee --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index dd8fce9f..e969d508 100644 --- a/go.mod +++ b/go.mod @@ -231,7 +231,7 @@ require ( github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d + github.com/mudler/go-piper v0.0.0-20241022074816-3854e0221ffb github.com/mudler/water v0.0.0-20221010214108-8c7313014ce0 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.15.2 // indirect diff --git a/go.sum b/go.sum index 1dd44a5b..9047d4eb 100644 --- a/go.sum +++ b/go.sum @@ -498,6 +498,8 @@ github.com/mudler/edgevpn v0.28.3 h1:yIuoMExwKHy/mNMBXIsm6FUFbnB9ELIxw9KXrK9KHDk github.com/mudler/edgevpn v0.28.3/go.mod h1:HWcdIwj5zBgOD04Hn3I+J5E5Yb3kK1CwwWaEe6/QERo= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d h1:8udOFrDf/I83JL0/u22j6U6Q9z9LoSdby2a/DWdd0/s= github.com/mudler/go-piper v0.0.0-20240315144837-9d0100873a7d/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= +github.com/mudler/go-piper v0.0.0-20241022074816-3854e0221ffb h1:5qcuxQEpAqeV4ftV5nUt3/hB/RoTXq3MaaauOAedyXo= +github.com/mudler/go-piper v0.0.0-20241022074816-3854e0221ffb/go.mod h1:O7SwdSWMilAWhBZMK9N9Y/oBDyMMzshE3ju8Xkexwig= github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82 h1:FVT07EI8njvsD4tC2Hw8Xhactp5AWhsQWD4oTeQuSAU= github.com/mudler/go-processmanager v0.0.0-20240820160718-8b802d3ecf82/go.mod h1:Urp7LG5jylKoDq0663qeBh0pINGcRl35nXdKx82PSoU= github.com/mudler/go-stable-diffusion v0.0.0-20240429204715-4a3cd6aeae6f h1:cxtMSRkUfy+mjIQ3yMrU0txwQ4It913NEN4m1H8WWgo= From 418c582430df2ae236d260007c0ed7b42f0e2839 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Wed, 23 Oct 2024 11:17:21 +0200 Subject: [PATCH 0413/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `c8c07d658a6cefc5a50cfdf6be7d726503612303` (#3940) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a3f402ae..29b3c721 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=e01c67affe450638162a1a457e2e57859ef6ebf0 +CPPLLAMA_VERSION?=c8c07d658a6cefc5a50cfdf6be7d726503612303 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 8737a65760a05f1f75602b72d0c0cd4f6f0aad0a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 23 Oct 2024 15:34:32 +0200 Subject: [PATCH 0414/1530] feat: allow to disable '/metrics' endpoints for local stats (#3945) Seem the "/metrics" endpoint that is source of confusion as people tends to believe we collect telemetry data just because we import "opentelemetry", however it is still a good idea to allow to disable even local metrics if not really required. See also: https://github.com/mudler/LocalAI/issues/3942 Signed-off-by: Ettore Di Giacinto --- core/cli/run.go | 5 +++++ core/config/application_config.go | 5 +++++ core/http/app.go | 24 +++++++++++++----------- core/http/routes/localai.go | 4 +++- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/core/cli/run.go b/core/cli/run.go index a67839a0..b2d439a0 100644 --- a/core/cli/run.go +++ b/core/cli/run.go @@ -53,6 +53,7 @@ type RunCMD struct { OpaqueErrors bool `env:"LOCALAI_OPAQUE_ERRORS" default:"false" help:"If true, all error responses are replaced with blank 500 errors. This is intended only for hardening against information leaks and is normally not recommended." group:"hardening"` UseSubtleKeyComparison bool `env:"LOCALAI_SUBTLE_KEY_COMPARISON" default:"false" help:"If true, API Key validation comparisons will be performed using constant-time comparisons rather than simple equality. This trades off performance on each request for resiliancy against timing attacks." group:"hardening"` DisableApiKeyRequirementForHttpGet bool `env:"LOCALAI_DISABLE_API_KEY_REQUIREMENT_FOR_HTTP_GET" default:"false" help:"If true, a valid API key is not required to issue GET requests to portions of the web ui. This should only be enabled in secure testing environments" group:"hardening"` + DisableMetricsEndpoint bool `env:"LOCALAI_DISABLE_METRICS_ENDPOINT,DISABLE_METRICS_ENDPOINT" default:"false" help:"Disable the /metrics endpoint" group:"api"` HttpGetExemptedEndpoints []string `env:"LOCALAI_HTTP_GET_EXEMPTED_ENDPOINTS" default:"^/$,^/browse/?$,^/talk/?$,^/p2p/?$,^/chat/?$,^/text2image/?$,^/tts/?$,^/static/.*$,^/swagger.*$" help:"If LOCALAI_DISABLE_API_KEY_REQUIREMENT_FOR_HTTP_GET is overriden to true, this is the list of endpoints to exempt. Only adjust this in case of a security incident or as a result of a personal security posture review" group:"hardening"` Peer2Peer bool `env:"LOCALAI_P2P,P2P" name:"p2p" default:"false" help:"Enable P2P mode" group:"p2p"` Peer2PeerDHTInterval int `env:"LOCALAI_P2P_DHT_INTERVAL,P2P_DHT_INTERVAL" default:"360" name:"p2p-dht-interval" help:"Interval for DHT refresh (used during token generation)" group:"p2p"` @@ -108,6 +109,10 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error { config.WithLoadToMemory(r.LoadToMemory), } + if r.DisableMetricsEndpoint { + opts = append(opts, config.DisableMetricsEndpoint) + } + token := "" if r.Peer2Peer || r.Peer2PeerToken != "" { log.Info().Msg("P2P mode enabled") diff --git a/core/config/application_config.go b/core/config/application_config.go index 2af0c7ae..3f321e70 100644 --- a/core/config/application_config.go +++ b/core/config/application_config.go @@ -39,6 +39,7 @@ type ApplicationConfig struct { OpaqueErrors bool UseSubtleKeyComparison bool DisableApiKeyRequirementForHttpGet bool + DisableMetrics bool HttpGetExemptedEndpoints []*regexp.Regexp DisableGalleryEndpoint bool LoadToMemory []string @@ -350,6 +351,10 @@ func WithDisableApiKeyRequirementForHttpGet(required bool) AppOption { } } +var DisableMetricsEndpoint AppOption = func(o *ApplicationConfig) { + o.DisableMetrics = true +} + func WithHttpGetExemptedEndpoints(endpoints []string) AppOption { return func(o *ApplicationConfig) { o.HttpGetExemptedEndpoints = []*regexp.Regexp{} diff --git a/core/http/app.go b/core/http/app.go index 2cf0ad17..2ba2c2b9 100644 --- a/core/http/app.go +++ b/core/http/app.go @@ -109,19 +109,21 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi app.Use(recover.New()) } - metricsService, err := services.NewLocalAIMetricsService() - if err != nil { - return nil, err - } + if !appConfig.DisableMetrics { + metricsService, err := services.NewLocalAIMetricsService() + if err != nil { + return nil, err + } - if metricsService != nil { - app.Use(localai.LocalAIMetricsAPIMiddleware(metricsService)) - app.Hooks().OnShutdown(func() error { - return metricsService.Shutdown() - }) - } + if metricsService != nil { + app.Use(localai.LocalAIMetricsAPIMiddleware(metricsService)) + app.Hooks().OnShutdown(func() error { + return metricsService.Shutdown() + }) + } - // Health Checks should always be exempt from auth, so register these first + } + // Health Checks should always be exempt from auth, so register these first routes.HealthRoutes(app) kaConfig, err := middleware.GetKeyAuthConfig(appConfig) diff --git a/core/http/routes/localai.go b/core/http/routes/localai.go index f2f0dfa4..a2ef16a5 100644 --- a/core/http/routes/localai.go +++ b/core/http/routes/localai.go @@ -42,7 +42,9 @@ func RegisterLocalAIRoutes(app *fiber.App, app.Post("/stores/get", localai.StoresGetEndpoint(sl, appConfig)) app.Post("/stores/find", localai.StoresFindEndpoint(sl, appConfig)) - app.Get("/metrics", localai.LocalAIMetricsEndpoint()) + if !appConfig.DisableMetrics { + app.Get("/metrics", localai.LocalAIMetricsEndpoint()) + } // Experimental Backend Statistics Module backendMonitorService := services.NewBackendMonitorService(ml, cl, appConfig) // Split out for now From c75ecfa009c867f6cf4e5328b1676058246da485 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 23 Oct 2024 15:34:45 +0200 Subject: [PATCH 0415/1530] fix(phi3-vision): add multimodal template (#3944) Signed-off-by: Ettore Di Giacinto --- gallery/phi-3-vision.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gallery/phi-3-vision.yaml b/gallery/phi-3-vision.yaml index 1a3d03af..682e3b4f 100644 --- a/gallery/phi-3-vision.yaml +++ b/gallery/phi-3-vision.yaml @@ -20,4 +20,6 @@ config_file: | completion: | {{.Input}} use_tokenizer_template: false + multimodal: "{{ range .Images }}<|image_{{ add1 .ID}}|>{{end}}\n{{.Text}}" + # XXX: The one below can be dropped after a new release is out image: "<|image_{{ add1 .ID }}|>\n{{.Text}}" From ae1ec4e096b84ace92471f73305c3a1fcb3e02f8 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 23 Oct 2024 15:34:57 +0200 Subject: [PATCH 0416/1530] feat(vllm): expose 'load_format' (#3943) Signed-off-by: Ettore Di Giacinto --- backend/python/vllm/backend.py | 2 ++ core/backend/options.go | 1 + core/config/backend_config.go | 1 + 3 files changed, 4 insertions(+) diff --git a/backend/python/vllm/backend.py b/backend/python/vllm/backend.py index 023a14bc..98ac5081 100644 --- a/backend/python/vllm/backend.py +++ b/backend/python/vllm/backend.py @@ -95,6 +95,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): if request.Quantization != "": engine_args.quantization = request.Quantization + if request.LoadFormat != "": + engine_args.load_format = request.LoadFormat if request.GPUMemoryUtilization != 0: engine_args.gpu_memory_utilization = request.GPUMemoryUtilization if request.TrustRemoteCode: diff --git a/core/backend/options.go b/core/backend/options.go index 90d563e0..82c582c8 100644 --- a/core/backend/options.go +++ b/core/backend/options.go @@ -139,6 +139,7 @@ func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions { DraftModel: c.DraftModel, AudioPath: c.VallE.AudioPath, Quantization: c.Quantization, + LoadFormat: c.LoadFormat, GPUMemoryUtilization: c.GPUMemoryUtilization, TrustRemoteCode: c.TrustRemoteCode, EnforceEager: c.EnforceEager, diff --git a/core/config/backend_config.go b/core/config/backend_config.go index b386d096..c3d1063d 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -143,6 +143,7 @@ type LLMConfig struct { DraftModel string `yaml:"draft_model"` NDraft int32 `yaml:"n_draft"` Quantization string `yaml:"quantization"` + LoadFormat string `yaml:"load_format"` GPUMemoryUtilization float32 `yaml:"gpu_memory_utilization"` // vLLM TrustRemoteCode bool `yaml:"trust_remote_code"` // vLLM EnforceEager bool `yaml:"enforce_eager"` // vLLM From 835932e95e473a0c716fdeaa244c306c86f149a4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 23 Oct 2024 15:46:06 +0200 Subject: [PATCH 0417/1530] feat: update proto file Signed-off-by: Ettore Di Giacinto --- backend/backend.proto | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/backend.proto b/backend/backend.proto index 568655b6..85e87260 100644 --- a/backend/backend.proto +++ b/backend/backend.proto @@ -219,6 +219,7 @@ message ModelOptions { int32 SwapSpace = 53; int32 MaxModelLen = 54; int32 TensorParallelSize = 55; + string LoadFormat = 58; string MMProj = 41; From 7748eb6553295240a1f81fce1aae26aa74dfbd30 Mon Sep 17 00:00:00 2001 From: Mauro Morales Date: Wed, 23 Oct 2024 20:02:08 +0200 Subject: [PATCH 0418/1530] docs: add Homebrew as an option to install on MacOS (#3946) Add Homebrew as an option to install on MacOS Signed-off-by: Mauro Morales --- docs/content/docs/getting-started/quickstart.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/content/docs/getting-started/quickstart.md b/docs/content/docs/getting-started/quickstart.md index 9ccc0faa..4e14c505 100644 --- a/docs/content/docs/getting-started/quickstart.md +++ b/docs/content/docs/getting-started/quickstart.md @@ -30,6 +30,19 @@ For a full list of options, refer to the [Installer Options]({{% relref "docs/ad Binaries can also be [manually downloaded]({{% relref "docs/reference/binaries" %}}). +## Using Homebrew on MacOS + +{{% alert icon="⚠️" %}} +The Homebrew formula currently doesn't have the same options than the bash script +{{% /alert %}} + +You can install Homebrew's [LocalAI](https://formulae.brew.sh/formula/localai) with the following command: + +``` +brew install localai +``` + + ## Using Container Images or Kubernetes LocalAI is available as a container image compatible with various container engines such as Docker, Podman, and Kubernetes. Container images are published on [quay.io](https://quay.io/repository/go-skynet/local-ai?tab=tags&tag=latest) and [Docker Hub](https://hub.docker.com/r/localai/localai). From a91c2e7aaabe7168b106d6cd3df163653a619eef Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:08:20 +0200 Subject: [PATCH 0419/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `0fbaac9c891055796456df7b9122a70c220f9ca1` (#3950) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 29b3c721..f0b9004c 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=a5abfe6a90495f7bf19fe70d016ecc255e97359c +WHISPER_CPP_VERSION?=0fbaac9c891055796456df7b9122a70c220f9ca1 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 81890e76a0a2e1ea66f6ea5f12f9be64f04a98e8 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:08:55 +0200 Subject: [PATCH 0420/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `0a1c750c80147687df267114c81956757cc14382` (#3948) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f0b9004c..e62bfc21 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=c8c07d658a6cefc5a50cfdf6be7d726503612303 +CPPLLAMA_VERSION?=0a1c750c80147687df267114c81956757cc14382 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From e88468640f13e219aef77d7210b4f915506ad408 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 24 Oct 2024 11:40:35 +0200 Subject: [PATCH 0421/1530] fix(parler-tts): use latest audiotools (#3954) Signed-off-by: Ettore Di Giacinto --- backend/python/parler-tts/install.sh | 14 +++++++------- backend/python/parler-tts/requirements-after.txt | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/backend/python/parler-tts/install.sh b/backend/python/parler-tts/install.sh index aae690c4..954c6702 100755 --- a/backend/python/parler-tts/install.sh +++ b/backend/python/parler-tts/install.sh @@ -15,12 +15,12 @@ installRequirements # https://github.com/descriptinc/audiotools/issues/101 # incompatible protobuf versions. -PYDIR=python3.10 -pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/" +# PYDIR=python3.10 +# pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/" -if [ ! -d ${pyenv} ]; then - echo "(parler-tts/install.sh): Error: ${pyenv} does not exist" - exit 1 -fi +# if [ ! -d ${pyenv} ]; then +# echo "(parler-tts/install.sh): Error: ${pyenv} does not exist" +# exit 1 +# fi -curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py +# curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py diff --git a/backend/python/parler-tts/requirements-after.txt b/backend/python/parler-tts/requirements-after.txt index 09811bf4..df0b8a9e 100644 --- a/backend/python/parler-tts/requirements-after.txt +++ b/backend/python/parler-tts/requirements-after.txt @@ -1,3 +1,4 @@ git+https://github.com/huggingface/parler-tts.git@8e465f1b5fcd223478e07175cb40494d19ffbe17 llvmlite==0.43.0 numba==0.60.0 +git+https://github.com/descriptinc/audiotools \ No newline at end of file From 5be2d221179cf77e7cacad699f95e9fc886490a9 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Thu, 24 Oct 2024 22:45:26 +0200 Subject: [PATCH 0422/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `958367bf530d943a902afa1ce1c342476098576b` (#3956) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e62bfc21..2e6284ee 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=0a1c750c80147687df267114c81956757cc14382 +CPPLLAMA_VERSION?=958367bf530d943a902afa1ce1c342476098576b # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 07ce0a3c1756ee6e025775ee3bf5e885220f2956 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Serta=C3=A7=20=C3=96zercan?= <852750+sozercan@users.noreply.github.com> Date: Fri, 25 Oct 2024 01:12:43 -0700 Subject: [PATCH 0423/1530] feat: add flux single file support (#3959) feat: flux pipeline single file Signed-off-by: Sertac Ozercan --- backend/python/diffusers/backend.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index e7ad1cdd..087b449e 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -247,11 +247,16 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): use_safetensors=True, variant=variant) elif request.PipelineType == "FluxPipeline": + if fromSingleFile: + self.pipe = FluxPipeline.from_single_file(modelFile, + torch_dtype=torchType, + use_safetensors=True) + else: self.pipe = FluxPipeline.from_pretrained( request.Model, torch_dtype=torch.bfloat16) - if request.LowVRAM: - self.pipe.enable_model_cpu_offload() + if request.LowVRAM: + self.pipe.enable_model_cpu_offload() elif request.PipelineType == "FluxTransformer2DModel": dtype = torch.bfloat16 # specify from environment or default to "ChuckMcSneed/FLUX.1-dev" From 9c5cd9b38be3d6ad9db17f287df44f2f284519d4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 25 Oct 2024 12:25:29 +0200 Subject: [PATCH 0424/1530] fix(parler-tts): pin grpcio-tools (#3960) Seems we require a specific version to build the backend files. Signed-off-by: Ettore Di Giacinto --- backend/python/parler-tts/Makefile | 5 +++-- backend/python/parler-tts/install.sh | 14 +------------- backend/python/parler-tts/protogen.sh | 6 ++++++ backend/python/parler-tts/requirements-after.txt | 3 +-- backend/python/parler-tts/requirements.txt | 1 + 5 files changed, 12 insertions(+), 17 deletions(-) create mode 100755 backend/python/parler-tts/protogen.sh diff --git a/backend/python/parler-tts/Makefile b/backend/python/parler-tts/Makefile index c25b2af7..48da2f3f 100644 --- a/backend/python/parler-tts/Makefile +++ b/backend/python/parler-tts/Makefile @@ -12,9 +12,10 @@ export SKIP_CONDA=1 endif .PHONY: parler-tts -parler-tts: protogen +parler-tts: @echo "Installing $(CONDA_ENV_PATH)..." bash install.sh $(CONDA_ENV_PATH) + $(MAKE) protogen .PHONY: run run: protogen @@ -36,7 +37,7 @@ protogen-clean: $(RM) backend_pb2_grpc.py backend_pb2.py backend_pb2_grpc.py backend_pb2.py: - python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto + bash protogen.sh .PHONY: clean clean: protogen-clean diff --git a/backend/python/parler-tts/install.sh b/backend/python/parler-tts/install.sh index 954c6702..fc51d564 100755 --- a/backend/python/parler-tts/install.sh +++ b/backend/python/parler-tts/install.sh @@ -11,16 +11,4 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi -installRequirements - -# https://github.com/descriptinc/audiotools/issues/101 -# incompatible protobuf versions. -# PYDIR=python3.10 -# pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/" - -# if [ ! -d ${pyenv} ]; then -# echo "(parler-tts/install.sh): Error: ${pyenv} does not exist" -# exit 1 -# fi - -# curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py +installRequirements \ No newline at end of file diff --git a/backend/python/parler-tts/protogen.sh b/backend/python/parler-tts/protogen.sh new file mode 100755 index 00000000..32f39fbb --- /dev/null +++ b/backend/python/parler-tts/protogen.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto \ No newline at end of file diff --git a/backend/python/parler-tts/requirements-after.txt b/backend/python/parler-tts/requirements-after.txt index df0b8a9e..ca083c40 100644 --- a/backend/python/parler-tts/requirements-after.txt +++ b/backend/python/parler-tts/requirements-after.txt @@ -1,4 +1,3 @@ git+https://github.com/huggingface/parler-tts.git@8e465f1b5fcd223478e07175cb40494d19ffbe17 llvmlite==0.43.0 -numba==0.60.0 -git+https://github.com/descriptinc/audiotools \ No newline at end of file +numba==0.60.0 \ No newline at end of file diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index ff9adca9..7fbf4cb3 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,4 +1,5 @@ grpcio==1.67.0 +grpcio-tools==1.44.0 protobuf certifi llvmlite==0.43.0 \ No newline at end of file From fd905b483be6904af36265341f29622d8c62c9f7 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 25 Oct 2024 12:32:37 +0200 Subject: [PATCH 0425/1530] fix(gallery): overrides for parler-tts in the gallery (#3962) chore(parler-tts): fix overrides in the gallery Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gallery/index.yaml b/gallery/index.yaml index 28cd50b6..6e6a37f4 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2522,8 +2522,9 @@ ### START parler-tts url: "github:mudler/LocalAI/gallery/parler-tts.yaml@master" name: parler-tts-mini-v0.1 - parameters: - model: parler-tts/parler_tts_mini_v0.1 + overrides: + parameters: + model: parler-tts/parler_tts_mini_v0.1 license: apache-2.0 description: | Parler-TTS is a lightweight text-to-speech (TTS) model that can generate high-quality, natural sounding speech in the style of a given speaker (gender, pitch, speaking style, etc). It is a reproduction of work from the paper Natural language guidance of high-fidelity text-to-speech with synthetic annotations by Dan Lyth and Simon King, from Stability AI and Edinburgh University respectively. From dd2e243997098d086f2cbc84b31e093608a96834 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 25 Oct 2024 12:32:48 +0200 Subject: [PATCH 0426/1530] chore(python): update backend sample to consume grpcio from venv (#3961) Backends can as well depends on grpcio and require different versions from the ones that are installed in the system. Signed-off-by: Ettore Di Giacinto --- backend/python/common/template/Makefile | 5 +++-- backend/python/common/template/protogen.sh | 6 ++++++ backend/python/common/template/requirements.txt | 3 ++- 3 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 backend/python/common/template/protogen.sh diff --git a/backend/python/common/template/Makefile b/backend/python/common/template/Makefile index 6cc45707..c0e5169f 100644 --- a/backend/python/common/template/Makefile +++ b/backend/python/common/template/Makefile @@ -1,8 +1,9 @@ .DEFAULT_GOAL := install .PHONY: install -install: protogen +install: bash install.sh + $(MAKE) protogen .PHONY: protogen protogen: backend_pb2_grpc.py backend_pb2.py @@ -12,7 +13,7 @@ protogen-clean: $(RM) backend_pb2_grpc.py backend_pb2.py backend_pb2_grpc.py backend_pb2.py: - python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto + bash protogen.sh .PHONY: clean clean: protogen-clean diff --git a/backend/python/common/template/protogen.sh b/backend/python/common/template/protogen.sh new file mode 100644 index 00000000..32f39fbb --- /dev/null +++ b/backend/python/common/template/protogen.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto \ No newline at end of file diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt index 16716764..b59960b2 100644 --- a/backend/python/common/template/requirements.txt +++ b/backend/python/common/template/requirements.txt @@ -1,2 +1,3 @@ grpcio==1.67.0 -protobuf \ No newline at end of file +protobuf +grpcio-tools \ No newline at end of file From d9905ba0507b365ce119a2d80a56bd10b288a5f2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 25 Oct 2024 12:59:37 +0200 Subject: [PATCH 0427/1530] fix(ci): drop grpcio-tools pin to apple CI test run (#3964) Signed-off-by: Ettore Di Giacinto --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f1078706..ecef0569 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -224,7 +224,7 @@ jobs: - name: Dependencies run: | brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm - pip install --user --no-cache-dir grpcio-tools==1.64.1 + pip install --user --no-cache-dir grpcio-tools - name: Test run: | export C_INCLUDE_PATH=/usr/local/include From dbe7ac484cfc1425c5e1c22bfedd12ad5417206d Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Fri, 25 Oct 2024 23:42:18 +0200 Subject: [PATCH 0428/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `668750357e66bfa3d1504b65699f5a0dfe3cb7cb` (#3965) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2e6284ee..93c0c9a4 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=958367bf530d943a902afa1ce1c342476098576b +CPPLLAMA_VERSION?=668750357e66bfa3d1504b65699f5a0dfe3cb7cb # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 3ad920b50a590d63c71deccbb2cc4e8ca537bec0 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 25 Oct 2024 23:50:12 +0200 Subject: [PATCH 0429/1530] fix(parler-tts): pin protobuf (#3963) * fix(parler-tts): pin protobuf Signed-off-by: Ettore Di Giacinto Signed-off-by: Ettore Di Giacinto * debug Signed-off-by: Ettore Di Giacinto * Re-apply workaround Signed-off-by: Ettore Di Giacinto --------- Signed-off-by: Ettore Di Giacinto Signed-off-by: Ettore Di Giacinto --- .github/workflows/test-extra.yml | 7 +++++++ backend/python/parler-tts/install.sh | 16 +++++++++++++++- backend/python/parler-tts/requirements-after.txt | 3 ++- backend/python/parler-tts/requirements.txt | 4 +--- 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-extra.yml b/.github/workflows/test-extra.yml index 8b37b52d..a2c34872 100644 --- a/.github/workflows/test-extra.yml +++ b/.github/workflows/test-extra.yml @@ -123,6 +123,13 @@ jobs: run: | make --jobs=5 --output-sync=target -C backend/python/parler-tts make --jobs=5 --output-sync=target -C backend/python/parler-tts test + - name: Setup tmate session if tests fail + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3.19 + with: + detached: true + connect-timeout-seconds: 180 + limit-access-to-actor: true tests-openvoice: runs-on: ubuntu-latest diff --git a/backend/python/parler-tts/install.sh b/backend/python/parler-tts/install.sh index fc51d564..14df9b14 100755 --- a/backend/python/parler-tts/install.sh +++ b/backend/python/parler-tts/install.sh @@ -11,4 +11,18 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi -installRequirements \ No newline at end of file + +installRequirements + + +# https://github.com/descriptinc/audiotools/issues/101 +# incompatible protobuf versions. +PYDIR=python3.10 +pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/" + +if [ ! -d ${pyenv} ]; then + echo "(parler-tts/install.sh): Error: ${pyenv} does not exist" + exit 1 +fi + +curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py diff --git a/backend/python/parler-tts/requirements-after.txt b/backend/python/parler-tts/requirements-after.txt index ca083c40..702074de 100644 --- a/backend/python/parler-tts/requirements-after.txt +++ b/backend/python/parler-tts/requirements-after.txt @@ -1,3 +1,4 @@ git+https://github.com/huggingface/parler-tts.git@8e465f1b5fcd223478e07175cb40494d19ffbe17 llvmlite==0.43.0 -numba==0.60.0 \ No newline at end of file +numba==0.60.0 +grpcio-tools==1.42.0 \ No newline at end of file diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index 7fbf4cb3..3187f526 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,5 +1,3 @@ grpcio==1.67.0 -grpcio-tools==1.44.0 -protobuf certifi -llvmlite==0.43.0 \ No newline at end of file +llvmlite==0.43.0 From 9f43f37150a1e6243deddfe3559f656af9f572bb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 09:02:27 +0200 Subject: [PATCH 0430/1530] models(gallery): add l3.1-moe-2x8b-v0.2 (#3969) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 6e6a37f4..552ad2ac 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1778,6 +1778,24 @@ - filename: Baldur-8B.Q4_K_M.gguf sha256: 645b393fbac5cd17ccfd66840a3a05c3930e01b903dd1535f0347a74cc443fc7 uri: huggingface://QuantFactory/Baldur-8B-GGUF/Baldur-8B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "l3.1-moe-2x8b-v0.2" + icon: https://github.com/moeru-ai/L3.1-Moe/blob/main/cover/v0.2.png?raw=true + urls: + - https://huggingface.co/moeru-ai/L3.1-Moe-2x8B-v0.2 + - https://huggingface.co/mradermacher/L3.1-Moe-2x8B-v0.2-GGUF + description: | + This model is a Mixture of Experts (MoE) made with mergekit-moe. It uses the following base models: + Joseph717171/Llama-3.1-SuperNova-8B-Lite_TIES_with_Base + ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.2 + Heavily inspired by mlabonne/Beyonder-4x7B-v3. + overrides: + parameters: + model: L3.1-Moe-2x8B-v0.2.Q4_K_M.gguf + files: + - filename: L3.1-Moe-2x8B-v0.2.Q4_K_M.gguf + sha256: 87f8b294aa213aa3f866e03a53923f4df8f797ea94dc93f88b8a1b58d85fbca0 + uri: huggingface://mradermacher/L3.1-Moe-2x8B-v0.2-GGUF/L3.1-Moe-2x8B-v0.2.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From a27af2d7ad035b2b573f01a7cd9205d654379472 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 09:05:18 +0200 Subject: [PATCH 0431/1530] models(gallery): add llama3.1-darkstorm-aspire-8b (#3970) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 552ad2ac..c537f73b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1796,6 +1796,24 @@ - filename: L3.1-Moe-2x8B-v0.2.Q4_K_M.gguf sha256: 87f8b294aa213aa3f866e03a53923f4df8f797ea94dc93f88b8a1b58d85fbca0 uri: huggingface://mradermacher/L3.1-Moe-2x8B-v0.2-GGUF/L3.1-Moe-2x8B-v0.2.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "llama3.1-darkstorm-aspire-8b" + urls: + - https://huggingface.co/ZeroXClem/Llama3.1-DarkStorm-Aspire-8B + - https://huggingface.co/mradermacher/Llama3.1-DarkStorm-Aspire-8B-GGUF + description: | + Welcome to Llama3.1-DarkStorm-Aspire-8B — an advanced and versatile 8B parameter AI model born from the fusion of powerful language models, designed to deliver superior performance across research, writing, coding, and creative tasks. This unique merge blends the best qualities of the Dark Enigma, Storm, and Aspire models, while built on the strong foundation of DarkStock. With balanced integration, it excels in generating coherent, context-aware, and imaginative outputs. + Llama3.1-DarkStorm-Aspire-8B combines cutting-edge natural language processing capabilities to perform exceptionally well in a wide variety of tasks: + Research and Analysis: Perfect for analyzing textual data, planning experiments, and brainstorming complex ideas. + Creative Writing and Roleplaying: Excels in creative writing, immersive storytelling, and generating roleplaying scenarios. + General AI Applications: Use it for any application where advanced reasoning, instruction-following, and creativity are needed. + overrides: + parameters: + model: Llama3.1-DarkStorm-Aspire-8B.Q4_K_M.gguf + files: + - filename: Llama3.1-DarkStorm-Aspire-8B.Q4_K_M.gguf + sha256: b1686b3039509034add250db9ddcd7d6dbefd37136ac6717bc4fec3ec47ecd03 + uri: huggingface://mradermacher/Llama3.1-DarkStorm-Aspire-8B-GGUF/Llama3.1-DarkStorm-Aspire-8B.Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 82db2fa425a4dd33ee1383567d18cb1f96334c2e Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 09:09:22 +0200 Subject: [PATCH 0432/1530] models(gallery): add llama-3.2-sun-2.5b-chat (#3971) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c537f73b..3657b529 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -210,6 +210,38 @@ - filename: Llama-3.2-3B-Reasoning-Time.Q4_K_M.gguf sha256: 80b10e1a5c6e27f6d8cf08c3472af2b15a9f63ebf8385eedfe8615f85116c73f uri: huggingface://mradermacher/Llama-3.2-3B-Reasoning-Time-GGUF/Llama-3.2-3B-Reasoning-Time.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama-3.2-sun-2.5b-chat" + icon: https://i.ibb.co/PF0TdMJ/imagine-image-9a56cee7-0f4f-4cc2-b265-a5b8d04f266b.png + urls: + - https://huggingface.co/meditsolutions/Llama-3.2-SUN-2.5B-chat + - https://huggingface.co/mradermacher/Llama-3.2-SUN-2.5B-chat-GGUF + description: | + Base Model + Llama 3.2 1B + Extended Size + 1B to 2.5B parameters + Extension Method + Proprietary technique developed by MedIT Solutions + Fine-tuning + Open (or open subsets allowing for commercial use) open datasets from HF + Open (or open subsets allowing for commercial use) SFT datasets from HF + Training Status + Current version: chat-1.0.0 + Key Features + Built on Llama 3.2 architecture + Expanded from 1B to 2.47B parameters + Optimized for open-ended conversations + Incorporates supervised fine-tuning for improved performance + Use Case + General conversation and task-oriented interactions + overrides: + parameters: + model: Llama-3.2-SUN-2.5B-chat.Q4_K_M.gguf + files: + - filename: Llama-3.2-SUN-2.5B-chat.Q4_K_M.gguf + sha256: 4cd1796806200662500e1393ae8e0a32306fab2b6679a746ee53ad2130e5f3a2 + uri: huggingface://mradermacher/Llama-3.2-SUN-2.5B-chat-GGUF/Llama-3.2-SUN-2.5B-chat.Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 546dce68a6ea4146a202976bfa9d13ead4fb5b7a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 14:50:18 +0200 Subject: [PATCH 0433/1530] chore: change url to github repository (#3972) Signed-off-by: Ettore Di Giacinto --- core/http/views/partials/footer.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/http/views/partials/footer.html b/core/http/views/partials/footer.html index 6e732f96..e3513258 100644 --- a/core/http/views/partials/footer.html +++ b/core/http/views/partials/footer.html @@ -1,5 +1,5 @@ From 43bfdc95612923bd12d785c580824f56b5a6732c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 14:52:55 +0200 Subject: [PATCH 0434/1530] models(gallery): add darkest-muse-v1 (#3973) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 3657b529..c56d6446 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3182,6 +3182,29 @@ - filename: Apollo2-9B.Q4_K_M.gguf sha256: 9fdb63f78e574558a4f33782eca88716eea28e90ea3ae36c381769cde6b81e0f uri: huggingface://mradermacher/Apollo2-9B-GGUF/Apollo2-9B.Q4_K_M.gguf +- !!merge <<: *gemma + name: "darkest-muse-v1" + icon: https://cdn-uploads.huggingface.co/production/uploads/65ad56b4c2eef2ba1154618c/0AB6uPPuCvbNtRZb3Rdj1.png + urls: + - https://huggingface.co/sam-paech/Darkest-muse-v1 + - https://huggingface.co/bartowski/Darkest-muse-v1-GGUF + description: | + This is a creative writing merge of two very different models that I trained on the brand new Gutenberg3 dataset, plus Ataraxy-v2 in the mix. + + It's lost much of the slop and tryhard vocab flexing and positivity bias that's typical of these models and writes in its own voice. + + The main source model in the merge, Quill-v1, inherited a natural, spare prose from the human writing in the gutenberg set. The other source model, Delirium-v1, got overcooked in SIMPO training; it has crazy panache, a really dark flair for the grotesque, and has some mental issues. These two source models balance each other out in the merge, resulting in something pretty unique. + + It seems to be quite uncensored and creative. Since Delirium was pushed right to the edge during training, the merge may exhibit some of its weirdness and word / concept fixations. This may be mitigated by using custom anti-slop lists. + + The payoff is a really creative, stream of consciousness style of writing, with punchy dialogue that I haven't seen in other models. Oh, it also scored around the top of the EQ-Bench creative writing leaderboard! + overrides: + parameters: + model: Darkest-muse-v1-Q4_K_M.gguf + files: + - filename: Darkest-muse-v1-Q4_K_M.gguf + sha256: a19ec9e3dc875511ea771bf363e71e7ae5578986b2f8cf50aeb50683d56e9b76 + uri: huggingface://bartowski/Darkest-muse-v1-GGUF/Darkest-muse-v1-Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 175ae751baddb12536ed171f39780936a87f5aac Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 14:56:02 +0200 Subject: [PATCH 0435/1530] models(gallery): add llama-3.2-3b-instruct-uncensored (#3974) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c56d6446..f2bdce97 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -242,6 +242,21 @@ - filename: Llama-3.2-SUN-2.5B-chat.Q4_K_M.gguf sha256: 4cd1796806200662500e1393ae8e0a32306fab2b6679a746ee53ad2130e5f3a2 uri: huggingface://mradermacher/Llama-3.2-SUN-2.5B-chat-GGUF/Llama-3.2-SUN-2.5B-chat.Q4_K_M.gguf +- !!merge <<: *llama32 + name: "llama-3.2-3b-instruct-uncensored" + icon: https://i.imgur.com/JOePyAN.png + urls: + - https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-uncensored-GGUF + - https://huggingface.co/chuanli11/Llama-3.2-3B-Instruct-uncensored + description: | + This is an uncensored version of the original Llama-3.2-3B-Instruct, created using mlabonne's script, which builds on FailSpy's notebook and the original work from Andy Arditi et al.. + overrides: + parameters: + model: Llama-3.2-3B-Instruct-uncensored-Q4_K_M.gguf + files: + - filename: Llama-3.2-3B-Instruct-uncensored-Q4_K_M.gguf + sha256: 80f532552e3d56e366226f428395de8285a671f2da1d5fd68563741181b77a95 + uri: huggingface://bartowski/Llama-3.2-3B-Instruct-uncensored-GGUF/Llama-3.2-3B-Instruct-uncensored-Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct" From 4528e969c9fed1764409ede8ae4f0ef2851ab691 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sat, 26 Oct 2024 14:56:41 +0200 Subject: [PATCH 0436/1530] models(gallery): add thebeagle-v2beta-32b-mgs (#3975) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index f2bdce97..29526534 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -675,6 +675,32 @@ - filename: Meissa-Qwen2.5-7B-Instruct.Q4_K_M.gguf sha256: 632b10d5c0e98bc8d53295886da2d57772a54bb6f6fa01d458e9e8c7fa9c905a uri: huggingface://QuantFactory/Meissa-Qwen2.5-7B-Instruct-GGUF/Meissa-Qwen2.5-7B-Instruct.Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "thebeagle-v2beta-32b-mgs" + urls: + - https://huggingface.co/fblgit/TheBeagle-v2beta-32B-MGS + - https://huggingface.co/bartowski/TheBeagle-v2beta-32B-MGS-GGUF + description: | + This model is an experimental version of our latest innovation: MGS. Its up to you to figure out what does it means, but its very explicit. We didn't applied our known UNA algorithm to the forward pass, but they are entirely compatible and operates in different parts of the neural network and in different ways, tho they both can be seen as a regularization technique. + + Updated tokenizer_config.json (from the base_model) + Regenerated Quants (being uploaded) + Re-submitted Leaderboard Evaluation, MATH & IFeval have relevant updates + Aligned LICENSE with Qwen terms. + + MGS stands for... Many-Geeks-Searching... and thats it. Hint: 1+1 is 2, and 1+1 is not 3 + We still believe on 1-Epoch should be enough, so we just did 1 Epoch only. + Dataset + Used here the first decent (corpora & size) dataset on the hub: Magpie-Align/Magpie-Pro-300K-Filtered Kudos to the Magpie team to contribute with some decent stuff that I personally think is very good to ablate. + It achieves the following results on the evaluation set: + Loss: 0.5378 (1 Epoch), outperforming the baseline model. + overrides: + parameters: + model: TheBeagle-v2beta-32B-MGS-Q4_K_M.gguf + files: + - filename: TheBeagle-v2beta-32B-MGS-Q4_K_M.gguf + sha256: db0d3b3c5341d2d51115794bf5da6552b5c0714b041de9b82065cc0c982dd4f7 + uri: huggingface://bartowski/TheBeagle-v2beta-32B-MGS-GGUF/TheBeagle-v2beta-32B-MGS-Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From e314cdcdde8190f16eabe5cdaf76f96dd1ce1f2a Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sat, 26 Oct 2024 23:40:42 +0200 Subject: [PATCH 0437/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `cc2983d3753c94a630ca7257723914d4c4f6122b` (#3976) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 93c0c9a4..6eef4a92 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=668750357e66bfa3d1504b65699f5a0dfe3cb7cb +CPPLLAMA_VERSION?=cc2983d3753c94a630ca7257723914d4c4f6122b # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From a8c08d83d0dd92c8901d75e909e3a278d184c5f1 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 27 Oct 2024 09:06:27 +0100 Subject: [PATCH 0438/1530] models(gallery): add l3.1-70blivion-v0.1-rc1-70b-i1 (#3977) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 29526534..6508ace9 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1887,6 +1887,25 @@ - filename: Llama3.1-DarkStorm-Aspire-8B.Q4_K_M.gguf sha256: b1686b3039509034add250db9ddcd7d6dbefd37136ac6717bc4fec3ec47ecd03 uri: huggingface://mradermacher/Llama3.1-DarkStorm-Aspire-8B-GGUF/Llama3.1-DarkStorm-Aspire-8B.Q4_K_M.gguf +- !!merge <<: *llama31 + name: "l3.1-70blivion-v0.1-rc1-70b-i1" + icon: https://huggingface.co/invisietch/L3.1-70Blivion-v0.1-rc1-70B/resolve/main/header.png + urls: + - https://huggingface.co/invisietch/L3.1-70Blivion-v0.1-rc1-70B + - https://huggingface.co/mradermacher/L3.1-70Blivion-v0.1-rc1-70B-i1-GGUF + description: | + 70Blivion v0.1 is a model in the release candidate stage, based on a merge of L3.1 Nemotron 70B & Euryale 2.2 with a healing training step. Further training will be needed to get this model to release quality. + + This model is designed to be suitable for creative writing and roleplay. This RC is not a finished product, but your feedback will drive the creation of better models. + + This is a release candidate model. It has some known issues and probably some unknown ones too, because the purpose of these early releases is to seek feedback. + overrides: + parameters: + model: L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf + files: + - filename: L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf + sha256: 27b10c3ca4507e8bf7d305d60e5313b54ef5fffdb43a03f36223d19d906e39f3 + uri: huggingface://mradermacher/L3.1-70Blivion-v0.1-rc1-70B-i1-GGUF/L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From 8327e85e34509e50230303a7469d0528602d9060 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 27 Oct 2024 09:08:34 +0100 Subject: [PATCH 0439/1530] models(gallery): add llama-3.1-hawkish-8b (#3978) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 6508ace9..88f5f272 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1906,6 +1906,23 @@ - filename: L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf sha256: 27b10c3ca4507e8bf7d305d60e5313b54ef5fffdb43a03f36223d19d906e39f3 uri: huggingface://mradermacher/L3.1-70Blivion-v0.1-rc1-70B-i1-GGUF/L3.1-70Blivion-v0.1-rc1-70B.i1-Q4_K_M.gguf +- !!merge <<: *llama31 + icon: https://i.imgur.com/sdN0Aqg.jpeg + name: "llama-3.1-hawkish-8b" + urls: + - https://huggingface.co/mukaj/Llama-3.1-Hawkish-8B + - https://huggingface.co/bartowski/Llama-3.1-Hawkish-8B-GGUF + description: | + Model has been further finetuned on a set of newly generated 50m high quality tokens related to Financial topics covering topics such as Economics, Fixed Income, Equities, Corporate Financing, Derivatives and Portfolio Management. Data was gathered from publicly available sources and went through several stages of curation into instruction data from the initial amount of 250m+ tokens. To aid in mitigating forgetting information from the original finetune, the data was mixed with instruction sets on the topics of Coding, General Knowledge, NLP and Conversational Dialogue. + + The model has shown to improve over a number of benchmarks over the original model, notably in Math and Economics. This model represents the first time a 8B model has been able to convincingly get a passing score on the CFA Level 1 exam, requiring a typical 300 hours of studying, indicating a significant improvement in Financial Knowledge. + overrides: + parameters: + model: Llama-3.1-Hawkish-8B-Q4_K_M.gguf + files: + - filename: Llama-3.1-Hawkish-8B-Q4_K_M.gguf + sha256: 613693936bbe641f41560151753716ba549ca052260fc5c0569e943e0bb834c3 + uri: huggingface://bartowski/Llama-3.1-Hawkish-8B-GGUF/Llama-3.1-Hawkish-8B-Q4_K_M.gguf - &deepseek ## Deepseek url: "github:mudler/LocalAI/gallery/deepseek.yaml@master" From eb34f838f86bcd75384db9b5e60a727e51532709 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Sun, 27 Oct 2024 22:43:51 +0100 Subject: [PATCH 0440/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `8841ce3f439de6e770f70319b7e08b6613197ea7` (#3979) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6eef4a92..aebfa78a 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=cc2983d3753c94a630ca7257723914d4c4f6122b +CPPLLAMA_VERSION?=8841ce3f439de6e770f70319b7e08b6613197ea7 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From c33083aeca0184043bf2c0b5720c7d5627492653 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 09:59:21 +0100 Subject: [PATCH 0441/1530] models(gallery): add quill-v1 (#3980) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 88f5f272..4c8b8b40 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3282,6 +3282,23 @@ - filename: Darkest-muse-v1-Q4_K_M.gguf sha256: a19ec9e3dc875511ea771bf363e71e7ae5578986b2f8cf50aeb50683d56e9b76 uri: huggingface://bartowski/Darkest-muse-v1-GGUF/Darkest-muse-v1-Q4_K_M.gguf +- !!merge <<: *gemma + name: "quill-v1" + icon: https://cdn-uploads.huggingface.co/production/uploads/65ad56b4c2eef2ba1154618c/gnMF8gRhurS9RcoylAK1Y.png + urls: + - https://huggingface.co/sam-paech/Quill-v1 + - https://huggingface.co/QuantFactory/Quill-v1-GGUF + description: | + Quill is a capable, humanlike writing model trained on a large dataset of late 19th and early 20th century writing from the Gutenberg Project. This model writes with a natural cadence and low gpt-slop, having inherited some human qualities from the Gutenberg3 dataset. It writes with more simple, spare prose than the typical overly-adjectived LLM writing style. + + This model was trained using gemma-2-9b-it as the base. The training methods used were ORPO (gently) then SIMPO (less gently). + overrides: + parameters: + model: Quill-v1.Q4_K_M.gguf + files: + - filename: Quill-v1.Q4_K_M.gguf + sha256: 419a7e0709b28130ca56941308d11c06a3548b8eacb081fb6a2c3d1622ac56b3 + uri: huggingface://QuantFactory/Quill-v1-GGUF/Quill-v1.Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 9ea8159683ca569050cc808543b715004b5aa3fb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 10:09:53 +0100 Subject: [PATCH 0442/1530] models(gallery): add delirium-v1 (#3981) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 4c8b8b40..bb23b1af 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3299,6 +3299,23 @@ - filename: Quill-v1.Q4_K_M.gguf sha256: 419a7e0709b28130ca56941308d11c06a3548b8eacb081fb6a2c3d1622ac56b3 uri: huggingface://QuantFactory/Quill-v1-GGUF/Quill-v1.Q4_K_M.gguf +- !!merge <<: *gemma + name: "delirium-v1" + icon: https://cdn-uploads.huggingface.co/production/uploads/65ad56b4c2eef2ba1154618c/TDY0sDC9vMohMM8dn_5YN.png + urls: + - https://huggingface.co/sam-paech/Delirium-v1 + - https://huggingface.co/QuantFactory/Delirium-v1-GGUF + description: | + This model was cooked a bit too long during SIMPO training. It writes like Hunter S. Thompson 2 days into an ether binge. It's grotesque, dark, grimy and genius. + + It's trained on an experimental gutenberg + antislop dataset. This contains the original two gutenberg sets by jondurbin and nbeerbower, as well as a subset of my own set, gutenberg3. The antislop pairs were generated with gemma-2-9b-it, with one sample generated with the AntiSlop sampler and the rejected sample generated without. + overrides: + parameters: + model: Delirium-v1.Q4_K_M.gguf + files: + - filename: Delirium-v1.Q4_K_M.gguf + sha256: 9c274913572b8afcd5f18f0230f9ddf0a972bae36bae5b0fe8266b29a5dd06a7 + uri: huggingface://QuantFactory/Delirium-v1-GGUF/Delirium-v1.Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 66b03b54cbeae89f01bcbae86561baf7136164a1 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 16:24:14 +0100 Subject: [PATCH 0443/1530] models(gallery): add magnum-v4-9b (#3983) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index bb23b1af..c7d9352f 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -3316,6 +3316,24 @@ - filename: Delirium-v1.Q4_K_M.gguf sha256: 9c274913572b8afcd5f18f0230f9ddf0a972bae36bae5b0fe8266b29a5dd06a7 uri: huggingface://QuantFactory/Delirium-v1-GGUF/Delirium-v1.Q4_K_M.gguf +- !!merge <<: *gemma + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + name: "magnum-v4-9b" + icon: https://cdn-uploads.huggingface.co/production/uploads/658a46cbfb9c2bdfae75b3a6/vxYDYerLy2vD8n05nL2WU.png + urls: + - https://huggingface.co/anthracite-org/magnum-v4-9b + - https://huggingface.co/QuantFactory/magnum-v4-9b-GGUF + description: | + This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet and Opus. + + This model is fine-tuned on top of gemma 2 9b (chatML'ified). + overrides: + parameters: + model: magnum-v4-9b.Q4_K_M.gguf + files: + - filename: magnum-v4-9b.Q4_K_M.gguf + sha256: 176cb8cbac1920d98853a079d635d581c2063b7ff337e88bf9f28b43f8c7eb23 + uri: huggingface://QuantFactory/magnum-v4-9b-GGUF/magnum-v4-9b.Q4_K_M.gguf - &llama3 url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master" icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png From 8de0f21f7c7a080c54a583c3ce793f0bfebded5d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 16:35:24 +0100 Subject: [PATCH 0444/1530] models(gallery): add llama-3-whiterabbitneo-8b-v2.0 (#3984) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index c7d9352f..1b5c8c67 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4539,6 +4539,38 @@ - filename: Loki-base.i1-Q4_K_M.gguf sha256: 60a4357fa399bfd18aa841cc529da09439791331d117a4f06f0467d002b385bb uri: huggingface://mradermacher/Loki-base-i1-GGUF/Loki-base.i1-Q4_K_M.gguf +- !!merge <<: *llama3 + name: "llama-3-whiterabbitneo-8b-v2.0" + icon: https://huggingface.co/migtissera/WhiteRabbitNeo/resolve/main/WhiteRabbitNeo.png + urls: + - https://huggingface.co/WhiteRabbitNeo/Llama-3-WhiteRabbitNeo-8B-v2.0 + - https://huggingface.co/QuantFactory/Llama-3-WhiteRabbitNeo-8B-v2.0-GGUF + description: | + WhiteRabbitNeo is a model series that can be used for offensive and defensive cybersecurity. + Topics Covered: + - Open Ports: Identifying open ports is crucial as they can be entry points for attackers. Common ports to check include HTTP (80, 443), FTP (21), SSH (22), and SMB (445). + - Outdated Software or Services: Systems running outdated software or services are often vulnerable to exploits. This includes web servers, database servers, and any third-party software. + - Default Credentials: Many systems and services are installed with default usernames and passwords, which are well-known and can be easily exploited. + - Misconfigurations: Incorrectly configured services, permissions, and security settings can introduce vulnerabilities. + - Injection Flaws: SQL injection, command injection, and cross-site scripting (XSS) are common issues in web applications. + - Unencrypted Services: Services that do not use encryption (like HTTP instead of HTTPS) can expose sensitive data. + - Known Software Vulnerabilities: Checking for known vulnerabilities in software using databases like the National Vulnerability Database (NVD) or tools like Nessus or OpenVAS. + - Cross-Site Request Forgery (CSRF): This is where unauthorized commands are transmitted from a user that the web application trusts. + - Insecure Direct Object References: This occurs when an application provides direct access to objects based on user-supplied input. + - Security Misconfigurations in Web Servers/Applications: This includes issues like insecure HTTP headers or verbose error messages that reveal too much information. + - Broken Authentication and Session Management: This can allow attackers to compromise passwords, keys, or session tokens, or to exploit other implementation flaws to assume other users' identities. + - Sensitive Data Exposure: Includes vulnerabilities that expose sensitive data, such as credit card numbers, health records, or personal information. + - API Vulnerabilities: In modern web applications, APIs are often used and can have vulnerabilities like insecure endpoints or data leakage. + - Denial of Service (DoS) Vulnerabilities: Identifying services that are vulnerable to DoS attacks, which can make the resource unavailable to legitimate users. + - Buffer Overflows: Common in older software, these vulnerabilities can allow an attacker to crash the system or execute arbitrary code. + - More .. + overrides: + parameters: + model: Llama-3-WhiteRabbitNeo-8B-v2.0.Q4_K_M.gguf + files: + - filename: Llama-3-WhiteRabbitNeo-8B-v2.0.Q4_K_M.gguf + sha256: cf01ba2ca5af2a3ecd6a2221d19b8b91ec0e9fe06fa8fdffd774d5e0a2459c4c + uri: huggingface://QuantFactory/Llama-3-WhiteRabbitNeo-8B-v2.0-GGUF/Llama-3-WhiteRabbitNeo-8B-v2.0.Q4_K_M.gguf - &dolphin name: "dolphin-2.9-llama3-8b" url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From 75bc933dc4379722b3b6fddf1fab6936c7b56d8f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 19:00:55 +0100 Subject: [PATCH 0445/1530] models(gallery): add l3-nymeria-maid-8b (#3985) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 1b5c8c67..24130987 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4571,6 +4571,25 @@ - filename: Llama-3-WhiteRabbitNeo-8B-v2.0.Q4_K_M.gguf sha256: cf01ba2ca5af2a3ecd6a2221d19b8b91ec0e9fe06fa8fdffd774d5e0a2459c4c uri: huggingface://QuantFactory/Llama-3-WhiteRabbitNeo-8B-v2.0-GGUF/Llama-3-WhiteRabbitNeo-8B-v2.0.Q4_K_M.gguf +- !!merge <<: *llama3 + name: "l3-nymeria-maid-8b" + icon: https://huggingface.co/tannedbum/L3-Nymeria-Maid-8B-exl2/resolve/main/Nymeria.png? + urls: + - https://huggingface.co/tannedbum/L3-Nymeria-Maid-8B + - https://huggingface.co/QuantFactory/L3-Nymeria-Maid-8B-GGUF + description: | + The model is a merge of pre-trained language models created using the mergekit library. It combines the following models: + - Sao10K/L3-8B-Stheno-v3.2 + - princeton-nlp/Llama-3-Instruct-8B-SimPO + The merge was performed using the slerp merge method. The models were merged using the slerp merge method and the configuration used to produce the model is included in the text. The model is not suitable for all audiences and is intended for scientific purposes. + Nymeria is the balanced version, doesn't force nsfw. Nymeria-Maid has more Stheno's weights, leans more on nsfw and is more submissive. + overrides: + parameters: + model: L3-Nymeria-Maid-8B.Q4_K_M.gguf + files: + - filename: L3-Nymeria-Maid-8B.Q4_K_M.gguf + sha256: 05bce561daa59b38cf9b79973c3b1e2e27af6d1e8e41570760af54800a09bcc2 + uri: huggingface://QuantFactory/L3-Nymeria-Maid-8B-GGUF/L3-Nymeria-Maid-8B.Q4_K_M.gguf - &dolphin name: "dolphin-2.9-llama3-8b" url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master" From 94010a0a441a3525b638d40dd468eb76fa3c9c27 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 19:12:59 +0100 Subject: [PATCH 0446/1530] models(gallery): add meraj-mini (#3987) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 24130987..46e5f91c 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -701,6 +701,21 @@ - filename: TheBeagle-v2beta-32B-MGS-Q4_K_M.gguf sha256: db0d3b3c5341d2d51115794bf5da6552b5c0714b041de9b82065cc0c982dd4f7 uri: huggingface://bartowski/TheBeagle-v2beta-32B-MGS-GGUF/TheBeagle-v2beta-32B-MGS-Q4_K_M.gguf +- !!merge <<: *qwen25 + name: "meraj-mini" + icon: https://i.ibb.co/CmPSSpq/Screenshot-2024-10-06-at-9-45-06-PM.png + urls: + - https://huggingface.co/arcee-ai/Meraj-Mini + - https://huggingface.co/QuantFactory/Meraj-Mini-GGUF + description: | + Arcee Meraj Mini is a quantized version of the Meraj-Mini model, created using llama.cpp. It is an open-source model that is fine-tuned from the Qwen2.5-7B-Instruct model and is designed for both Arabic and English languages. The model has undergone evaluations across multiple benchmarks in both languages and demonstrates top-tier performance in Arabic and competitive results in English. The key stages in its development include data preparation, initial training, iterative training and post-training, evaluation, and final model creation. The model is capable of solving a wide range of language tasks and is suitable for various applications such as education, mathematics and coding, customer service, and content creation. The Arcee Meraj Mini model consistently outperforms state-of-the-art models on most benchmarks of the Open Arabic LLM Leaderboard (OALL), highlighting its improvements and effectiveness in Arabic language content. + overrides: + parameters: + model: Meraj-Mini.Q4_K_M.gguf + files: + - filename: Meraj-Mini.Q4_K_M.gguf + sha256: f8f3923eb924b8f8e8f530a5bf07fcbd5b3dd10dd478d229d6f4377e31eb3938 + uri: huggingface://QuantFactory/Meraj-Mini-GGUF/Meraj-Mini.Q4_K_M.gguf - &archfunct license: apache-2.0 tags: From a8e10f03e997e7ee17e96bc3524592e20996a045 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 19:33:05 +0100 Subject: [PATCH 0447/1530] chore(deps): Bump openai from 1.51.2 to 1.52.2 in /examples/langchain/langchainpy-localai-example (#3993) chore(deps): Bump openai Bumps [openai](https://github.com/openai/openai-python) from 1.51.2 to 1.52.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.51.2...v1.52.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 2cf468f1..b5f47739 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -18,7 +18,7 @@ multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.10.1 numpy==2.1.2 -openai==1.51.2 +openai==1.52.2 openapi-schema-pydantic==1.2.4 packaging>=23.2 pydantic==2.9.2 From d1cb2467fd712805edcf732f0c1bf2911472973f Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 19:33:52 +0100 Subject: [PATCH 0448/1530] models(gallery): add granite-3.0-1b-a400m-instruct (#3994) Signed-off-by: Ettore Di Giacinto --- gallery/granite.yaml | 40 ++++++++++++++++++++++++++++++++++++++++ gallery/index.yaml | 26 ++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 gallery/granite.yaml diff --git a/gallery/granite.yaml b/gallery/granite.yaml new file mode 100644 index 00000000..465cca18 --- /dev/null +++ b/gallery/granite.yaml @@ -0,0 +1,40 @@ +--- +name: "granite" + +config_file: | + mmap: true + template: + chat_message: | + <|{{ .RoleName }}|> + {{ if .FunctionCall -}} + Function call: + {{ else if eq .RoleName "tool" -}} + Function response: + {{ end -}} + {{ if .Content -}} + {{.Content }} + {{ end -}} + {{ if .FunctionCall -}} + {{toJson .FunctionCall}} + {{ end -}} + function: | + <|system|> + You are a function calling AI model. You are provided with functions to execute. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: + {{range .Functions}} + {'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }} + {{end}} + For each function call return a json object with function name and arguments + {{.Input -}} + <|assistant|> + chat: | + {{.Input -}} + <|assistant|> + completion: | + {{.Input}} + context_size: 4096 + f16: true + stopwords: + - '<|im_end|>' + - '' + - '' + - '<|' diff --git a/gallery/index.yaml b/gallery/index.yaml index 46e5f91c..6403b025 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,30 @@ --- +- &granite3 + name: "granite-3.0-1b-a400m-instruct" + urls: + - https://huggingface.co/ibm-granite/granite-3.0-1b-a400m-instruct + - https://huggingface.co/QuantFactory/granite-3.0-1b-a400m-instruct-GGUF + overrides: + parameters: + model: granite-3.0-1b-a400m-instruct.Q4_K_M.gguf + files: + - filename: granite-3.0-1b-a400m-instruct.Q4_K_M.gguf + sha256: 9571b5fc9676ebb59def3377dc848584463fb7f09ed59ebbff3b9f72fd7bd38a + uri: huggingface://QuantFactory/granite-3.0-1b-a400m-instruct-GGUF/granite-3.0-1b-a400m-instruct.Q4_K_M.gguf + url: "github:mudler/LocalAI/gallery/granite.yaml@master" + description: | + Granite 3.0 language models are a new set of lightweight state-of-the-art, open foundation models that natively support multilinguality, coding, reasoning, and tool usage, including the potential to be run on constrained compute resources. All the models are publicly released under an Apache 2.0 license for both research and commercial use. The models' data curation and training procedure were designed for enterprise usage and customization in mind, with a process that evaluates datasets for governance, risk and compliance (GRC) criteria, in addition to IBM's standard data clearance process and document quality checks. + Granite 3.0 includes 4 different models of varying sizes: + Dense Models: 2B and 8B parameter models, trained on 12 trillion tokens in total. + Mixture-of-Expert (MoE) Models: Sparse 1B and 3B MoE models, with 400M and 800M activated parameters respectively, trained on 10 trillion tokens in total. + Accordingly, these options provide a range of models with different compute requirements to choose from, with appropriate trade-offs with their performance on downstream tasks. At each scale, we release a base model — checkpoints of models after pretraining, as well as instruct checkpoints — models finetuned for dialogue, instruction-following, helpfulness, and safety. + tags: + - llm + - gguf + - gpu + - cpu + - moe + - granite - name: "moe-girl-1ba-7bt-i1" icon: https://cdn-uploads.huggingface.co/production/uploads/634262af8d8089ebaefd410e/kTXXSSSqpb21rfyOX7FUa.jpeg # chatml From 841dfefd62e3c20c353be9cfc4eb4e31320995d3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 28 Oct 2024 19:41:34 +0100 Subject: [PATCH 0449/1530] models(gallery): add moe-girl-800ma-3bt (#3995) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 6403b025..578a7e34 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -25,6 +25,25 @@ - cpu - moe - granite +- !!merge <<: *granite3 + name: "moe-girl-800ma-3bt" + icon: https://huggingface.co/allura-org/MoE-Girl-800MA-3BT/resolve/main/moe-girl-800-3.png + url: "github:mudler/LocalAI/gallery/chatml.yaml@master" + urls: + - https://huggingface.co/allura-org/MoE-Girl-800MA-3BT + - https://huggingface.co/mradermacher/MoE-Girl-800MA-3BT-GGUF + description: | + A roleplay-centric finetune of IBM's Granite 3.0 3B-A800M. LoRA finetune trained locally, whereas the others were FFT; while this results in less uptake of training data, it should also mean less degradation in Granite's core abilities, making it potentially easier to use for general-purpose tasks. + Disclaimer + + PLEASE do not expect godliness out of this, it's a model with 800 million active parameters. Expect something more akin to GPT-3 (the original, not GPT-3.5.) (Furthermore, this version is by a less experienced tuner; it's my first finetune that actually has decent-looking graphs, I don't really know what I'm doing yet!) + overrides: + parameters: + model: MoE-Girl-800MA-3BT.Q4_K_M.gguf + files: + - filename: MoE-Girl-800MA-3BT.Q4_K_M.gguf + sha256: 4c3cb57c27aadabd05573a1a01d6c7aee0f21620db919c7704f758d172e0bfa3 + uri: huggingface://mradermacher/MoE-Girl-800MA-3BT-GGUF/MoE-Girl-800MA-3BT.Q4_K_M.gguf - name: "moe-girl-1ba-7bt-i1" icon: https://cdn-uploads.huggingface.co/production/uploads/634262af8d8089ebaefd410e/kTXXSSSqpb21rfyOX7FUa.jpeg # chatml From 14cb620cd8a9efc416978994f988a2ed1adaa676 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 23:33:35 +0000 Subject: [PATCH 0450/1530] chore(deps): Bump torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118 in /backend/python/diffusers (#3997) chore(deps): Bump torchvision in /backend/python/diffusers Bumps torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118. --- updated-dependencies: - dependency-name: torchvision dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- backend/python/diffusers/requirements-hipblas.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/diffusers/requirements-hipblas.txt b/backend/python/diffusers/requirements-hipblas.txt index 17cf7249..b5d534cd 100644 --- a/backend/python/diffusers/requirements-hipblas.txt +++ b/backend/python/diffusers/requirements-hipblas.txt @@ -1,6 +1,6 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 torch==2.3.1+rocm6.0 -torchvision==0.18.1+rocm6.0 +torchvision==0.20.0+cu118 diffusers opencv-python transformers From 11d3ce9edbd5aea1693c3e19324c1ebde3bda55a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 29 Oct 2024 09:25:17 +0100 Subject: [PATCH 0451/1530] Revert "chore(deps): Bump torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118 in /backend/python/diffusers" (#4008) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert "chore(deps): Bump torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118 in …" This reverts commit 14cb620cd8a9efc416978994f988a2ed1adaa676. --- backend/python/diffusers/requirements-hipblas.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/python/diffusers/requirements-hipblas.txt b/backend/python/diffusers/requirements-hipblas.txt index b5d534cd..17cf7249 100644 --- a/backend/python/diffusers/requirements-hipblas.txt +++ b/backend/python/diffusers/requirements-hipblas.txt @@ -1,6 +1,6 @@ --extra-index-url https://download.pytorch.org/whl/rocm6.0 torch==2.3.1+rocm6.0 -torchvision==0.20.0+cu118 +torchvision==0.18.1+rocm6.0 diffusers opencv-python transformers From 3980beabd7e3a2b9a9aa6c09e682b97861c74d0f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:25:42 +0100 Subject: [PATCH 0452/1530] chore(deps): Bump docs/themes/hugo-theme-relearn from `06e70da` to `28fce6b` (#3986) chore(deps): Bump docs/themes/hugo-theme-relearn Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `06e70da` to `28fce6b`. - [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases) - [Commits](https://github.com/McShelby/hugo-theme-relearn/compare/06e70da8a6fb2043fe7e56b818ff638a309c8239...28fce6b04c414523280c53ee02f9f3a94d9d23da) --- updated-dependencies: - dependency-name: docs/themes/hugo-theme-relearn dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/themes/hugo-theme-relearn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/themes/hugo-theme-relearn b/docs/themes/hugo-theme-relearn index 06e70da8..28fce6b0 160000 --- a/docs/themes/hugo-theme-relearn +++ b/docs/themes/hugo-theme-relearn @@ -1 +1 @@ -Subproject commit 06e70da8a6fb2043fe7e56b818ff638a309c8239 +Subproject commit 28fce6b04c414523280c53ee02f9f3a94d9d23da From 605126db8a7f9205d31f9f6a1f3e857ddb56c103 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:26:12 +0100 Subject: [PATCH 0453/1530] chore(deps): Bump llama-index from 0.11.19 to 0.11.20 in /examples/langchain-chroma (#3988) chore(deps): Bump llama-index in /examples/langchain-chroma Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.19 to 0.11.20. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.19...v0.11.20) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index d8d4f480..8faf211c 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.3 openai==1.52.0 chromadb==0.5.13 -llama-index==0.11.19 \ No newline at end of file +llama-index==0.11.20 \ No newline at end of file From 293eaad69de3f6462efa8a3c654d3abf6a2efad0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:26:45 +0100 Subject: [PATCH 0454/1530] chore(deps): Bump openai from 1.52.0 to 1.52.2 in /examples/langchain-chroma (#3989) chore(deps): Bump openai in /examples/langchain-chroma Bumps [openai](https://github.com/openai/openai-python) from 1.52.0 to 1.52.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.52.0...v1.52.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain-chroma/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain-chroma/requirements.txt b/examples/langchain-chroma/requirements.txt index 8faf211c..6a67d98e 100644 --- a/examples/langchain-chroma/requirements.txt +++ b/examples/langchain-chroma/requirements.txt @@ -1,4 +1,4 @@ langchain==0.3.3 -openai==1.52.0 +openai==1.52.2 chromadb==0.5.13 llama-index==0.11.20 \ No newline at end of file From 15c083f73196f732058310e08bdd1d37347a728d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:27:44 +0100 Subject: [PATCH 0455/1530] chore(deps): Bump llama-index from 0.11.19 to 0.11.20 in /examples/chainlit (#3990) chore(deps): Bump llama-index in /examples/chainlit Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.19 to 0.11.20. - [Release notes](https://github.com/run-llama/llama_index/releases) - [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md) - [Commits](https://github.com/run-llama/llama_index/compare/v0.11.19...v0.11.20) --- updated-dependencies: - dependency-name: llama-index dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/chainlit/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/chainlit/requirements.txt b/examples/chainlit/requirements.txt index 3f4adf48..adcf2fc8 100644 --- a/examples/chainlit/requirements.txt +++ b/examples/chainlit/requirements.txt @@ -1,4 +1,4 @@ -llama_index==0.11.19 +llama_index==0.11.20 requests==2.32.3 weaviate_client==4.9.0 transformers From 7b23b894b49f4e6c0625aaef54b2c09a65b1a8fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:28:10 +0100 Subject: [PATCH 0456/1530] chore(deps): Bump tqdm from 4.66.5 to 4.66.6 in /examples/langchain/langchainpy-localai-example (#3991) chore(deps): Bump tqdm Bumps [tqdm](https://github.com/tqdm/tqdm) from 4.66.5 to 4.66.6. - [Release notes](https://github.com/tqdm/tqdm/releases) - [Commits](https://github.com/tqdm/tqdm/compare/v4.66.5...v4.66.6) --- updated-dependencies: - dependency-name: tqdm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index b5f47739..6fd4dde3 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -26,7 +26,7 @@ PyYAML==6.0.2 requests==2.32.3 SQLAlchemy==2.0.36 tenacity==8.5.0 -tqdm==4.66.5 +tqdm==4.66.6 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.2.3 From a7917a2150c9dd3e97346db8a5423f11b2527ce8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:29:20 +0100 Subject: [PATCH 0457/1530] chore(deps): Bump frozenlist from 1.4.1 to 1.5.0 in /examples/langchain/langchainpy-localai-example (#3992) chore(deps): Bump frozenlist Bumps [frozenlist](https://github.com/aio-libs/frozenlist) from 1.4.1 to 1.5.0. - [Release notes](https://github.com/aio-libs/frozenlist/releases) - [Changelog](https://github.com/aio-libs/frozenlist/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/frozenlist/compare/v1.4.1...v1.5.0) --- updated-dependencies: - dependency-name: frozenlist dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/langchain/langchainpy-localai-example/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt index 6fd4dde3..1a45c6ac 100644 --- a/examples/langchain/langchainpy-localai-example/requirements.txt +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -7,7 +7,7 @@ charset-normalizer==3.4.0 colorama==0.4.6 dataclasses-json==0.6.7 debugpy==1.8.7 -frozenlist==1.4.1 +frozenlist==1.5.0 greenlet==3.1.1 idna==3.10 langchain==0.3.3 From 3422d2134687d5272eaa3b206aef1566570682cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:30:03 +0100 Subject: [PATCH 0458/1530] chore(deps): Bump openai from 1.52.0 to 1.52.2 in /examples/functions (#4000) Bumps [openai](https://github.com/openai/openai-python) from 1.52.0 to 1.52.2. - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.52.0...v1.52.2) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/functions/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt index 0218e59c..a7f55900 100644 --- a/examples/functions/requirements.txt +++ b/examples/functions/requirements.txt @@ -1,2 +1,2 @@ langchain==0.3.4 -openai==1.52.0 +openai==1.52.2 From b897d47e0fa18fb701bd83a565c2e469358070e4 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 29 Oct 2024 15:04:21 +0100 Subject: [PATCH 0459/1530] chore(deps): bump grpcio to 1.67.1 (#4009) Signed-off-by: Ettore Di Giacinto --- backend/python/autogptq/requirements.txt | 2 +- backend/python/bark/requirements.txt | 2 +- backend/python/common/template/requirements.txt | 2 +- backend/python/coqui/requirements.txt | 2 +- backend/python/diffusers/requirements.txt | 2 +- backend/python/exllama2/requirements.txt | 2 +- backend/python/mamba/requirements.txt | 2 +- backend/python/openvoice/requirements-intel.txt | 2 +- backend/python/openvoice/requirements.txt | 2 +- backend/python/parler-tts/requirements.txt | 2 +- backend/python/rerankers/requirements.txt | 2 +- backend/python/sentencetransformers/requirements.txt | 2 +- backend/python/transformers-musicgen/requirements.txt | 2 +- backend/python/transformers/requirements.txt | 2 +- backend/python/vall-e-x/requirements.txt | 2 +- backend/python/vllm/install.sh | 2 +- backend/python/vllm/requirements.txt | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/backend/python/autogptq/requirements.txt b/backend/python/autogptq/requirements.txt index 7e66f084..08e33fd6 100644 --- a/backend/python/autogptq/requirements.txt +++ b/backend/python/autogptq/requirements.txt @@ -1,6 +1,6 @@ accelerate auto-gptq==0.7.1 -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi transformers \ No newline at end of file diff --git a/backend/python/bark/requirements.txt b/backend/python/bark/requirements.txt index d1a90719..62f27107 100644 --- a/backend/python/bark/requirements.txt +++ b/backend/python/bark/requirements.txt @@ -1,4 +1,4 @@ bark==0.1.5 -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt index b59960b2..c37ae499 100644 --- a/backend/python/common/template/requirements.txt +++ b/backend/python/common/template/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf grpcio-tools \ No newline at end of file diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt index d0c6d72f..892ebf64 100644 --- a/backend/python/coqui/requirements.txt +++ b/backend/python/coqui/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi packaging==24.1 \ No newline at end of file diff --git a/backend/python/diffusers/requirements.txt b/backend/python/diffusers/requirements.txt index 624b048e..42759dbc 100644 --- a/backend/python/diffusers/requirements.txt +++ b/backend/python/diffusers/requirements.txt @@ -1,5 +1,5 @@ setuptools -grpcio==1.67.0 +grpcio==1.67.1 pillow protobuf certifi diff --git a/backend/python/exllama2/requirements.txt b/backend/python/exllama2/requirements.txt index 8a0d9a17..1adc7283 100644 --- a/backend/python/exllama2/requirements.txt +++ b/backend/python/exllama2/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi wheel diff --git a/backend/python/mamba/requirements.txt b/backend/python/mamba/requirements.txt index 6be5d8ac..0bd0c161 100644 --- a/backend/python/mamba/requirements.txt +++ b/backend/python/mamba/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/openvoice/requirements-intel.txt b/backend/python/openvoice/requirements-intel.txt index 12195016..8d188490 100644 --- a/backend/python/openvoice/requirements-intel.txt +++ b/backend/python/openvoice/requirements-intel.txt @@ -2,7 +2,7 @@ intel-extension-for-pytorch torch optimum[openvino] -grpcio==1.67.0 +grpcio==1.67.1 protobuf librosa==0.9.1 faster-whisper==0.9.0 diff --git a/backend/python/openvoice/requirements.txt b/backend/python/openvoice/requirements.txt index c74eba87..ef552363 100644 --- a/backend/python/openvoice/requirements.txt +++ b/backend/python/openvoice/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf librosa faster-whisper diff --git a/backend/python/parler-tts/requirements.txt b/backend/python/parler-tts/requirements.txt index 3187f526..a3f7292f 100644 --- a/backend/python/parler-tts/requirements.txt +++ b/backend/python/parler-tts/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.67.0 +grpcio==1.67.1 certifi llvmlite==0.43.0 diff --git a/backend/python/rerankers/requirements.txt b/backend/python/rerankers/requirements.txt index 6be5d8ac..0bd0c161 100644 --- a/backend/python/rerankers/requirements.txt +++ b/backend/python/rerankers/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/sentencetransformers/requirements.txt b/backend/python/sentencetransformers/requirements.txt index 36ce8a6f..f287ad24 100644 --- a/backend/python/sentencetransformers/requirements.txt +++ b/backend/python/sentencetransformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi datasets diff --git a/backend/python/transformers-musicgen/requirements.txt b/backend/python/transformers-musicgen/requirements.txt index ea0e3fa9..48a6e234 100644 --- a/backend/python/transformers-musicgen/requirements.txt +++ b/backend/python/transformers-musicgen/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf scipy==1.14.0 certifi \ No newline at end of file diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt index d006cf0e..efaef767 100644 --- a/backend/python/transformers/requirements.txt +++ b/backend/python/transformers/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406 \ No newline at end of file diff --git a/backend/python/vall-e-x/requirements.txt b/backend/python/vall-e-x/requirements.txt index 6be5d8ac..0bd0c161 100644 --- a/backend/python/vall-e-x/requirements.txt +++ b/backend/python/vall-e-x/requirements.txt @@ -1,3 +1,3 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi \ No newline at end of file diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 69e74a06..7c9363be 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -22,7 +22,7 @@ if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then git clone https://github.com/vllm-project/vllm fi pushd vllm - uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.67.0 protobuf bitsandbytes + uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.67.1 protobuf bitsandbytes uv pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu VLLM_TARGET_DEVICE=cpu python setup.py install popd diff --git a/backend/python/vllm/requirements.txt b/backend/python/vllm/requirements.txt index 95447f74..6dfa3cb7 100644 --- a/backend/python/vllm/requirements.txt +++ b/backend/python/vllm/requirements.txt @@ -1,4 +1,4 @@ -grpcio==1.67.0 +grpcio==1.67.1 protobuf certifi setuptools \ No newline at end of file From 94d417c2b7c2afabc17d7bf7181f991e7281f221 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:06:57 +0100 Subject: [PATCH 0460/1530] chore: :arrow_up: Update ggerganov/llama.cpp to `61715d5cc83a28181df6a641846e4f6a740f3c74` (#4006) :arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index aebfa78a..3a0be6da 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=8841ce3f439de6e770f70319b7e08b6613197ea7 +CPPLLAMA_VERSION?=61715d5cc83a28181df6a641846e4f6a740f3c74 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp From 25a9685e2ff1344c94a0173206a2ff29eb1fb760 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:07:43 +0100 Subject: [PATCH 0461/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `d4bc413505b2fba98dffbb9a176ddd1b165941d0` (#4005) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> Co-authored-by: Ettore Di Giacinto --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3a0be6da..f3edbb62 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=0fbaac9c891055796456df7b9122a70c220f9ca1 +WHISPER_CPP_VERSION?=d4bc413505b2fba98dffbb9a176ddd1b165941d0 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From a4e749c22f4131c7b7f02a3ff574485d246b6ea2 Mon Sep 17 00:00:00 2001 From: "LocalAI [bot]" <139863280+localai-bot@users.noreply.github.com> Date: Tue, 29 Oct 2024 23:01:46 +0100 Subject: [PATCH 0462/1530] chore: :arrow_up: Update ggerganov/whisper.cpp to `55e422109b3504d1a824935cc2681ada7ee9fd38` (#4015) :arrow_up: Update ggerganov/whisper.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f3edbb62..c83251e5 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp -WHISPER_CPP_VERSION?=d4bc413505b2fba98dffbb9a176ddd1b165941d0 +WHISPER_CPP_VERSION?=55e422109b3504d1a824935cc2681ada7ee9fd38 # bert.cpp version BERT_REPO?=https://github.com/go-skynet/go-bert.cpp From 3d4bb757d289bd5a3a937e9bc7a886f52ac6d2cb Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 30 Oct 2024 08:31:13 +0100 Subject: [PATCH 0463/1530] chore(deps): bump llama-cpp to 8f275a7c4593aa34147595a90282cf950a853690 (#4016) Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/grpc-server.cpp | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/Makefile b/Makefile index c83251e5..a61400a2 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=61715d5cc83a28181df6a641846e4f6a740f3c74 +CPPLLAMA_VERSION?=8f275a7c4593aa34147595a90282cf950a853690 # go-rwkv version RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp diff --git a/backend/cpp/llama/grpc-server.cpp b/backend/cpp/llama/grpc-server.cpp index d6e1b995..d21735da 100644 --- a/backend/cpp/llama/grpc-server.cpp +++ b/backend/cpp/llama/grpc-server.cpp @@ -670,7 +670,6 @@ struct llama_server_context slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k); slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p); slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p); - slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z); slot->sparams.typ_p = json_value(data, "typical_p", default_sparams.typ_p); slot->sparams.temp = json_value(data, "temperature", default_sparams.temp); slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range); @@ -1206,7 +1205,6 @@ struct llama_server_context {"top_k", slot.sparams.top_k}, {"top_p", slot.sparams.top_p}, {"min_p", slot.sparams.min_p}, - {"tfs_z", slot.sparams.tfs_z}, {"typical_p", slot.sparams.typ_p}, {"repeat_last_n", slot.sparams.penalty_last_n}, {"repeat_penalty", slot.sparams.penalty_repeat}, @@ -2105,7 +2103,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama // slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict); // slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k); // slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p); - // slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z); // slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p); // slot->sparams.temp = json_value(data, "temperature", default_sparams.temp); // slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n); @@ -2129,7 +2126,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama data["n_predict"] = predict->tokens() == 0 ? -1 : predict->tokens(); data["top_k"] = predict->topk(); data["top_p"] = predict->topp(); - data["tfs_z"] = predict->tailfreesamplingz(); data["typical_p"] = predict->typicalp(); data["temperature"] = predict->temperature(); data["repeat_last_n"] = predict->repeat(); @@ -2176,7 +2172,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama // llama.params.n_predict = predict->tokens() == 0 ? -1 : predict->tokens(); // llama.params.sparams.top_k = predict->topk(); // llama.params.sparams.top_p = predict->topp(); -// llama.params.sparams.tfs_z = predict->tailfreesamplingz(); // llama.params.sparams.typical_p = predict->typicalp(); // llama.params.sparams.penalty_last_n = predict->repeat(); // llama.params.sparams.temp = predict->temperature(); From cde0139363ee5afca8786a535b48c256a7c1eb9a Mon Sep 17 00:00:00 2001 From: Dave Date: Wed, 30 Oct 2024 04:10:33 -0400 Subject: [PATCH 0464/1530] chore: drop examples folder now that LocalAI-examples has been created (#4017) Signed-off-by: Dave Lee --- .../Sound Generation/musicgen.bru | 0 .../backend monitor/backend monitor.bru | 0 .../backend monitor/backend-shutdown.bru | 0 .../LocalAI Test Requests/bruno.json | 0 .../environments/localhost.bru | 0 .../LocalAI Test Requests/get models list.bru | 0 .../image generation/Generate image.bru | 0 .../llm text/-completions.bru | 0 .../LocalAI Test Requests/llm text/-edits.bru | 0 .../llm text/-embeddings.bru | 0 .../chat completion -simple- 1 message-.bru | 0 .../llm text/chat/chat-completions -long-.bru | 0 .../chat/chat-completions -stream-.bru | 0 .../model gallery/add model gallery.bru | 0 .../model gallery/delete model gallery.bru | 0 .../list MODELS in galleries.bru | 0 .../model gallery/list model GALLERIES.bru | 0 .../model gallery/model delete.bru | 11 + .../model gallery apply -gist-.bru | 0 .../model gallery/model gallery apply.bru | 0 .../transcription/gb1.ogg | Bin 0 -> 1667662 bytes .../transcription/transcribe.bru | 16 + .../LocalAI Test Requests/tts/-tts.bru | 0 .../LocalAI Test Requests/tts/musicgen.bru | 0 README.md | 1 + examples/README.md | 190 - examples/autoGPT/.env.example | 9 - examples/autoGPT/README.md | 36 - examples/autoGPT/docker-compose.yaml | 42 - examples/chainlit/Dockerfile | 25 - examples/chainlit/README.md | 25 - examples/chainlit/config.yaml | 16 - examples/chainlit/main.py | 82 - examples/chainlit/requirements.txt | 6 - examples/chatbot-ui-manual/README.md | 50 - .../chatbot-ui-manual/docker-compose.yaml | 24 - examples/chatbot-ui-manual/models | 1 - examples/chatbot-ui/README.md | 46 - examples/chatbot-ui/docker-compose.yaml | 37 - examples/configurations/README.md | 95 - .../configurations/llava/chat-simple.tmpl | 3 - examples/configurations/llava/llava.yaml | 19 - .../configurations/mistral/chatml-block.tmpl | 3 - examples/configurations/mistral/chatml.tmpl | 3 - .../configurations/mistral/completion.tmpl | 1 - examples/configurations/mistral/mistral.yaml | 16 - examples/configurations/mixtral/mixtral | 1 - examples/configurations/mixtral/mixtral-chat | 1 - examples/configurations/mixtral/mixtral.yaml | 16 - examples/configurations/phi-2.yaml | 29 - examples/continue/README.md | 53 - examples/continue/config.py | 148 - examples/continue/docker-compose.yml | 27 - examples/continue/img/screen.png | Bin 201234 -> 0 bytes examples/discord-bot/.env.example | 9 - examples/discord-bot/README.md | 76 - examples/discord-bot/docker-compose.yaml | 21 - examples/discord-bot/models | 1 - examples/e2e-fine-tuning/README.md | 83 - examples/e2e-fine-tuning/axolotl.yaml | 63 - examples/e2e-fine-tuning/notebook.ipynb | 1655 --------- examples/flowise/README.md | 30 - examples/flowise/docker-compose.yaml | 37 - examples/functions/.env.example | 13 - examples/functions/Dockerfile | 5 - examples/functions/README.md | 21 - examples/functions/docker-compose.yaml | 23 - examples/functions/functions-openai.py | 76 - examples/functions/requirements.txt | 2 - examples/github-actions/workflow.yml | 83 - examples/insomnia/Insomnia_LocalAI.json | 1 - examples/insomnia/README.md | 17 - examples/k8sgpt/README.md | 72 - examples/k8sgpt/broken-pod.yaml | 14 - examples/k8sgpt/values.yaml | 96 - examples/kubernetes/deployment-intel-arc.yaml | 68 - examples/kubernetes/deployment-nvidia.yaml | 69 - examples/kubernetes/deployment.yaml | 65 - examples/langchain-chroma/.env.example | 8 - examples/langchain-chroma/.gitignore | 4 - examples/langchain-chroma/README.md | 63 - examples/langchain-chroma/docker-compose.yml | 15 - examples/langchain-chroma/models | 1 - examples/langchain-chroma/query.py | 23 - examples/langchain-chroma/requirements.txt | 4 - examples/langchain-chroma/store.py | 25 - examples/langchain-huggingface/README.md | 68 - .../langchain-huggingface/docker-compose.yml | 15 - examples/langchain-huggingface/models | 1 - examples/langchain-python/README.md | 29 - examples/langchain-python/agent.py | 44 - examples/langchain-python/docker-compose.yaml | 27 - examples/langchain-python/test.py | 6 - examples/langchain/.gitignore | 2 - examples/langchain/JS.Dockerfile | 6 - examples/langchain/PY.Dockerfile | 5 - examples/langchain/README.md | 30 - examples/langchain/docker-compose.yaml | 43 - .../langchainjs-localai-example/.gitignore | 2 - .../.vscode/launch.json | 20 - .../package-lock.json | 3085 ----------------- .../langchainjs-localai-example/package.json | 25 - .../langchainjs-localai-example/src/index.mts | 92 - .../langchainjs-localai-example/tsconfig.json | 16 - .../.vscode/launch.json | 24 - .../.vscode/settings.json | 3 - .../langchainpy-localai-example/full_demo.py | 46 - .../requirements.txt | 33 - .../simple_demo.py | 6 - examples/langchain/models | 1 - examples/llamaindex/README.md | 27 - examples/llamaindex/main.py | 29 - examples/localai-webui/README.md | 26 - examples/localai-webui/docker-compose.yml | 18 - examples/models/.gitignore | 7 - examples/models/completion.tmpl | 1 - examples/models/embeddings.yaml | 6 - examples/models/gpt-3.5-turbo.yaml | 16 - examples/models/gpt4all.tmpl | 4 - examples/privateGPT/README.md | 25 - examples/query_data/.gitignore | 1 - examples/query_data/README.md | 69 - examples/query_data/data/.keep | 0 examples/query_data/docker-compose.yml | 15 - examples/query_data/models | 1 - examples/query_data/query.py | 35 - examples/query_data/store.py | 27 - examples/query_data/update.py | 32 - examples/rwkv/.gitignore | 2 - examples/rwkv/Dockerfile.build | 12 - examples/rwkv/README.md | 59 - examples/rwkv/docker-compose.yaml | 16 - examples/rwkv/models/gpt-3.5-turbo.yaml | 18 - examples/rwkv/models/rwkv_chat.tmpl | 13 - examples/rwkv/models/rwkv_completion.tmpl | 1 - examples/rwkv/scripts/build.sh | 11 - examples/semantic-todo/README.md | 15 - examples/semantic-todo/go.mod | 18 - examples/semantic-todo/go.sum | 50 - examples/semantic-todo/main.go | 355 -- examples/slack-bot/.env.example | 14 - examples/slack-bot/README.md | 27 - examples/slack-bot/docker-compose.yaml | 23 - examples/slack-bot/models | 1 - examples/slack-qa-bot/.env.example | 51 - examples/slack-qa-bot/README.md | 23 - examples/slack-qa-bot/deployment.yaml | 97 - examples/slack-qa-bot/docker-compose.yml | 30 - examples/streamlit-bot/.gitignore | 1 - examples/streamlit-bot/LICENSE | 21 - examples/streamlit-bot/Main.py | 70 - examples/streamlit-bot/README.md | 54 - examples/streamlit-bot/cmd_windows.bat | 31 - .../streamlit-bot/install_requirements.bat | 81 - examples/streamlit-bot/requirements.txt | 2 - examples/streamlit-bot/start_windows.bat | 81 - examples/streamlit-bot/streamlit-bot.png | Bin 32815 -> 0 bytes examples/telegram-bot/README.md | 30 - examples/telegram-bot/docker-compose.yml | 38 - 159 files changed, 28 insertions(+), 8960 deletions(-) rename {examples/bruno => .bruno}/LocalAI Test Requests/Sound Generation/musicgen.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/backend monitor/backend monitor.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/backend monitor/backend-shutdown.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/bruno.json (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/environments/localhost.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/get models list.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/image generation/Generate image.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/llm text/-completions.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/llm text/-edits.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/llm text/-embeddings.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/model gallery/add model gallery.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/model gallery/delete model gallery.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/model gallery/list MODELS in galleries.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/model gallery/list model GALLERIES.bru (100%) create mode 100644 .bruno/LocalAI Test Requests/model gallery/model delete.bru rename {examples/bruno => .bruno}/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/model gallery/model gallery apply.bru (100%) create mode 100644 .bruno/LocalAI Test Requests/transcription/gb1.ogg create mode 100644 .bruno/LocalAI Test Requests/transcription/transcribe.bru rename {examples/bruno => .bruno}/LocalAI Test Requests/tts/-tts.bru (100%) rename {examples/bruno => .bruno}/LocalAI Test Requests/tts/musicgen.bru (100%) delete mode 100644 examples/README.md delete mode 100644 examples/autoGPT/.env.example delete mode 100644 examples/autoGPT/README.md delete mode 100644 examples/autoGPT/docker-compose.yaml delete mode 100644 examples/chainlit/Dockerfile delete mode 100644 examples/chainlit/README.md delete mode 100644 examples/chainlit/config.yaml delete mode 100644 examples/chainlit/main.py delete mode 100644 examples/chainlit/requirements.txt delete mode 100644 examples/chatbot-ui-manual/README.md delete mode 100644 examples/chatbot-ui-manual/docker-compose.yaml delete mode 120000 examples/chatbot-ui-manual/models delete mode 100644 examples/chatbot-ui/README.md delete mode 100644 examples/chatbot-ui/docker-compose.yaml delete mode 100644 examples/configurations/README.md delete mode 100644 examples/configurations/llava/chat-simple.tmpl delete mode 100644 examples/configurations/llava/llava.yaml delete mode 100644 examples/configurations/mistral/chatml-block.tmpl delete mode 100644 examples/configurations/mistral/chatml.tmpl delete mode 100644 examples/configurations/mistral/completion.tmpl delete mode 100644 examples/configurations/mistral/mistral.yaml delete mode 100644 examples/configurations/mixtral/mixtral delete mode 100644 examples/configurations/mixtral/mixtral-chat delete mode 100755 examples/configurations/mixtral/mixtral.yaml delete mode 100644 examples/configurations/phi-2.yaml delete mode 100644 examples/continue/README.md delete mode 100644 examples/continue/config.py delete mode 100644 examples/continue/docker-compose.yml delete mode 100755 examples/continue/img/screen.png delete mode 100644 examples/discord-bot/.env.example delete mode 100644 examples/discord-bot/README.md delete mode 100644 examples/discord-bot/docker-compose.yaml delete mode 120000 examples/discord-bot/models delete mode 100644 examples/e2e-fine-tuning/README.md delete mode 100644 examples/e2e-fine-tuning/axolotl.yaml delete mode 100644 examples/e2e-fine-tuning/notebook.ipynb delete mode 100644 examples/flowise/README.md delete mode 100644 examples/flowise/docker-compose.yaml delete mode 100644 examples/functions/.env.example delete mode 100644 examples/functions/Dockerfile delete mode 100644 examples/functions/README.md delete mode 100644 examples/functions/docker-compose.yaml delete mode 100644 examples/functions/functions-openai.py delete mode 100644 examples/functions/requirements.txt delete mode 100644 examples/github-actions/workflow.yml delete mode 100644 examples/insomnia/Insomnia_LocalAI.json delete mode 100644 examples/insomnia/README.md delete mode 100644 examples/k8sgpt/README.md delete mode 100644 examples/k8sgpt/broken-pod.yaml delete mode 100644 examples/k8sgpt/values.yaml delete mode 100644 examples/kubernetes/deployment-intel-arc.yaml delete mode 100644 examples/kubernetes/deployment-nvidia.yaml delete mode 100644 examples/kubernetes/deployment.yaml delete mode 100644 examples/langchain-chroma/.env.example delete mode 100644 examples/langchain-chroma/.gitignore delete mode 100644 examples/langchain-chroma/README.md delete mode 100644 examples/langchain-chroma/docker-compose.yml delete mode 120000 examples/langchain-chroma/models delete mode 100644 examples/langchain-chroma/query.py delete mode 100644 examples/langchain-chroma/requirements.txt delete mode 100755 examples/langchain-chroma/store.py delete mode 100644 examples/langchain-huggingface/README.md delete mode 100644 examples/langchain-huggingface/docker-compose.yml delete mode 120000 examples/langchain-huggingface/models delete mode 100644 examples/langchain-python/README.md delete mode 100644 examples/langchain-python/agent.py delete mode 100644 examples/langchain-python/docker-compose.yaml delete mode 100644 examples/langchain-python/test.py delete mode 100644 examples/langchain/.gitignore delete mode 100644 examples/langchain/JS.Dockerfile delete mode 100644 examples/langchain/PY.Dockerfile delete mode 100644 examples/langchain/README.md delete mode 100644 examples/langchain/docker-compose.yaml delete mode 100644 examples/langchain/langchainjs-localai-example/.gitignore delete mode 100644 examples/langchain/langchainjs-localai-example/.vscode/launch.json delete mode 100644 examples/langchain/langchainjs-localai-example/package-lock.json delete mode 100644 examples/langchain/langchainjs-localai-example/package.json delete mode 100644 examples/langchain/langchainjs-localai-example/src/index.mts delete mode 100644 examples/langchain/langchainjs-localai-example/tsconfig.json delete mode 100644 examples/langchain/langchainpy-localai-example/.vscode/launch.json delete mode 100644 examples/langchain/langchainpy-localai-example/.vscode/settings.json delete mode 100644 examples/langchain/langchainpy-localai-example/full_demo.py delete mode 100644 examples/langchain/langchainpy-localai-example/requirements.txt delete mode 100644 examples/langchain/langchainpy-localai-example/simple_demo.py delete mode 120000 examples/langchain/models delete mode 100644 examples/llamaindex/README.md delete mode 100644 examples/llamaindex/main.py delete mode 100644 examples/localai-webui/README.md delete mode 100644 examples/localai-webui/docker-compose.yml delete mode 100644 examples/models/.gitignore delete mode 100644 examples/models/completion.tmpl delete mode 100644 examples/models/embeddings.yaml delete mode 100644 examples/models/gpt-3.5-turbo.yaml delete mode 100644 examples/models/gpt4all.tmpl delete mode 100644 examples/privateGPT/README.md delete mode 100644 examples/query_data/.gitignore delete mode 100644 examples/query_data/README.md delete mode 100644 examples/query_data/data/.keep delete mode 100644 examples/query_data/docker-compose.yml delete mode 120000 examples/query_data/models delete mode 100644 examples/query_data/query.py delete mode 100644 examples/query_data/store.py delete mode 100644 examples/query_data/update.py delete mode 100644 examples/rwkv/.gitignore delete mode 100644 examples/rwkv/Dockerfile.build delete mode 100644 examples/rwkv/README.md delete mode 100644 examples/rwkv/docker-compose.yaml delete mode 100644 examples/rwkv/models/gpt-3.5-turbo.yaml delete mode 100644 examples/rwkv/models/rwkv_chat.tmpl delete mode 100644 examples/rwkv/models/rwkv_completion.tmpl delete mode 100755 examples/rwkv/scripts/build.sh delete mode 100644 examples/semantic-todo/README.md delete mode 100644 examples/semantic-todo/go.mod delete mode 100644 examples/semantic-todo/go.sum delete mode 100644 examples/semantic-todo/main.go delete mode 100644 examples/slack-bot/.env.example delete mode 100644 examples/slack-bot/README.md delete mode 100644 examples/slack-bot/docker-compose.yaml delete mode 120000 examples/slack-bot/models delete mode 100644 examples/slack-qa-bot/.env.example delete mode 100644 examples/slack-qa-bot/README.md delete mode 100644 examples/slack-qa-bot/deployment.yaml delete mode 100644 examples/slack-qa-bot/docker-compose.yml delete mode 100644 examples/streamlit-bot/.gitignore delete mode 100644 examples/streamlit-bot/LICENSE delete mode 100644 examples/streamlit-bot/Main.py delete mode 100644 examples/streamlit-bot/README.md delete mode 100644 examples/streamlit-bot/cmd_windows.bat delete mode 100644 examples/streamlit-bot/install_requirements.bat delete mode 100644 examples/streamlit-bot/requirements.txt delete mode 100644 examples/streamlit-bot/start_windows.bat delete mode 100644 examples/streamlit-bot/streamlit-bot.png delete mode 100644 examples/telegram-bot/README.md delete mode 100644 examples/telegram-bot/docker-compose.yml diff --git a/examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru b/.bruno/LocalAI Test Requests/Sound Generation/musicgen.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/Sound Generation/musicgen.bru rename to .bruno/LocalAI Test Requests/Sound Generation/musicgen.bru diff --git a/examples/bruno/LocalAI Test Requests/backend monitor/backend monitor.bru b/.bruno/LocalAI Test Requests/backend monitor/backend monitor.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/backend monitor/backend monitor.bru rename to .bruno/LocalAI Test Requests/backend monitor/backend monitor.bru diff --git a/examples/bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru b/.bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru rename to .bruno/LocalAI Test Requests/backend monitor/backend-shutdown.bru diff --git a/examples/bruno/LocalAI Test Requests/bruno.json b/.bruno/LocalAI Test Requests/bruno.json similarity index 100% rename from examples/bruno/LocalAI Test Requests/bruno.json rename to .bruno/LocalAI Test Requests/bruno.json diff --git a/examples/bruno/LocalAI Test Requests/environments/localhost.bru b/.bruno/LocalAI Test Requests/environments/localhost.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/environments/localhost.bru rename to .bruno/LocalAI Test Requests/environments/localhost.bru diff --git a/examples/bruno/LocalAI Test Requests/get models list.bru b/.bruno/LocalAI Test Requests/get models list.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/get models list.bru rename to .bruno/LocalAI Test Requests/get models list.bru diff --git a/examples/bruno/LocalAI Test Requests/image generation/Generate image.bru b/.bruno/LocalAI Test Requests/image generation/Generate image.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/image generation/Generate image.bru rename to .bruno/LocalAI Test Requests/image generation/Generate image.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/-completions.bru b/.bruno/LocalAI Test Requests/llm text/-completions.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/-completions.bru rename to .bruno/LocalAI Test Requests/llm text/-completions.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/-edits.bru b/.bruno/LocalAI Test Requests/llm text/-edits.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/-edits.bru rename to .bruno/LocalAI Test Requests/llm text/-edits.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/-embeddings.bru b/.bruno/LocalAI Test Requests/llm text/-embeddings.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/-embeddings.bru rename to .bruno/LocalAI Test Requests/llm text/-embeddings.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru b/.bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru rename to .bruno/LocalAI Test Requests/llm text/chat/chat completion -simple- 1 message-.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru b/.bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru rename to .bruno/LocalAI Test Requests/llm text/chat/chat-completions -long-.bru diff --git a/examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru b/.bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru rename to .bruno/LocalAI Test Requests/llm text/chat/chat-completions -stream-.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/add model gallery.bru b/.bruno/LocalAI Test Requests/model gallery/add model gallery.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/add model gallery.bru rename to .bruno/LocalAI Test Requests/model gallery/add model gallery.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/delete model gallery.bru b/.bruno/LocalAI Test Requests/model gallery/delete model gallery.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/delete model gallery.bru rename to .bruno/LocalAI Test Requests/model gallery/delete model gallery.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru b/.bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru rename to .bruno/LocalAI Test Requests/model gallery/list MODELS in galleries.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru b/.bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru rename to .bruno/LocalAI Test Requests/model gallery/list model GALLERIES.bru diff --git a/.bruno/LocalAI Test Requests/model gallery/model delete.bru b/.bruno/LocalAI Test Requests/model gallery/model delete.bru new file mode 100644 index 00000000..b320dae3 --- /dev/null +++ b/.bruno/LocalAI Test Requests/model gallery/model delete.bru @@ -0,0 +1,11 @@ +meta { + name: model delete + type: http + seq: 7 +} + +post { + url: {{PROTOCOL}}{{HOST}}:{{PORT}}/models/galleries + body: none + auth: none +} diff --git a/examples/bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru b/.bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru rename to .bruno/LocalAI Test Requests/model gallery/model gallery apply -gist-.bru diff --git a/examples/bruno/LocalAI Test Requests/model gallery/model gallery apply.bru b/.bruno/LocalAI Test Requests/model gallery/model gallery apply.bru similarity index 100% rename from examples/bruno/LocalAI Test Requests/model gallery/model gallery apply.bru rename to .bruno/LocalAI Test Requests/model gallery/model gallery apply.bru diff --git a/.bruno/LocalAI Test Requests/transcription/gb1.ogg b/.bruno/LocalAI Test Requests/transcription/gb1.ogg new file mode 100644 index 0000000000000000000000000000000000000000..df22d6363731c9867e4817a45f85a89436844159 GIT binary patch literal 1667662 zcma&Nd0bN2`#65x(Uefpz_b7n#ibD>BdtaO6%b8vNv#}3T+>QZ+vn{b7#ac^keUnP zl9p>_%Z$0;lA3#FHcf)*gFDkwwvehbPNx*AL6xu~}GBci-Ix%rgA%rRjQu*RS;-{Y6U zaCljIq#ChjIC%*LLeEjL!^yfhr6{G4@D~=Ef?uP{gnQ*D*CIejVGcrdYfuv5 znkZ#-I|#i%onfb3-nxcG#6nZntw~X>$$PyfS+Mom)L(R8iN98YHSu4egM^o+M=q~P z0h{K3okNkhCV&tCDk^RApV>fRX03X_006+;lHp!OaPJoLyW>~`h(V~Q9X(G$ z&#wt7jEOJDov5;rSJ_@_jUQ^YzuWry#+I+_bO7-4hxZ+Z>wM+o0sth!Ccm0oxELa; z2`$&j$!k;xG5|mmq|^6mf&09}-h02}-Y@^}R69ouB4UzWvJtFaU^zlX;o_SO_|Y1GlCnD^Vcy z92+updEmdFFq03q-Cty}1V)>y?7_5@82L_6+fkyKQTS&lJtaBxpJ)>Hf#0?>;8|G#&lZ z$LNRC(fdoHA9p0}pV`0ve%k-@_}cXhO(3Ii5!5<(&0u<0S3ghkgFeX#snZ{%Zt~ zWUcfc*#RDdpejH>yn``G^7&sLYl>9|KRfSQS6E&su#3t!&(<;iiF2d`>@UaoU5`=vV^oiBeW z!|gL^(A%5maG=~;?zg|rHgRLC!QT=A;NCBRAa+mgYf8z^djUapwt*Wi>xXLp7oYL+ z_+=2=Kq*1rEPq+QC2&K7UTB`3#((i$#vXg~R@Sh>=-u$>jUaEH9kt}I2niBB;a53O zzhVb2TjyUj_=2Ky_*R@cY#C3_^8*98G`>_Ym28Pl6n@aZ==(m~!hiuK|UD zk2uIp3JNKCdA!_44xY5NzxxZ#AMO(n6l4NYf({3syu1#i^oM&)1_s^FReq&xxN-Tl z`WiEMDsBINJN!TYU>9|uV1)7QsygP1M}LG!UWBVG&{^A~q9m5CB$I zS+FqM`e=68fu;@?Goh&?gn@4=o+0A#S7*X-Rvj~8b^x#f_G_h!VngCu0YV+X4ORt@ z5^VRv2jzp&v|3`@Cp|Li&nyOGEd*b~1fyw=gsobOt4w?p7+g^`Q)^+ikj<c^Lqzg{d-w&<0(K-P&vLZ&tbFkVJT1w9^Vm+UR-M?w1ngn zAT{l#Q7!N&7&G2MRmFU%P)vmhJZeT6jQT#rSpu#GY<8GN1v2Y95C~vr}Mb42^0e+*b!+X68T_G%>{DsUpt4c}!RgxNQQXwSc`AK6Cu= zpgy6LIyk6L9W-|>i)F)zDo}y$5MtJnfY1zu zQ9m6Fde@Ac?cFr9=4Ue{Y$oVfYmnJqu=)zkgf(tesROJ{=0Jr}Vrit+a^qpr^{;w3 z__rQjfZbhlq^ULMTI&^feF(l-06JXiUpb1+wD^jU7%_Eg6jLssCxVE|95$1IPy5ON zx_k}j{jb(IY|tP=*9tU0#45*+X;Ud-GZXMNAV&>|0O?9NU~fT0jUgl`h}l&06#@}8HB)RRh-8BlflMYSk8KAwnYLEN zC2a7z;5BL_5;nsDr1Nf4fcLzH^#&i8u&*4pV7(@5iv1OV2Y{uo_T(|wwM+`^&UG2) z>Kpr2qV3c&0zvQpJ86H--2ba;?C-JA-{YgNs{UV8z5snso<_unoPR!F5b(K%Wfc;X z*tL7M2pDD9)a0ynFaUJ2itMi{@dYjOwL_8#_y1}fw1>rCngpS;Y`-;?fW%f3*xyi1 z4)`Sgr3VlJTIQ=LHgg&D!2go`|3H5K5P<*t4*tul1)x010BQ@u-5MEW8d=nyhC+WE|7PcRQ~IhK=7r zR3BLN0Iiq>#&NV7pbgQB0W$8Wbq{2^c>$T!jX(s75~H3?i90OR1=hn1*=lZ_tg6(? zbrc?Izh>x;!%uq)tPheasT;vzDp(-EDFDF2-Do7IBPOOB5J=!NUlZupw%UJRGaSESfwk9+ zHDTK?-Jjg3W-YE4@c%Vf-(25tskybSt+BePrM{`Db|4p!pZ_fv0&IFu{1K+sr2#~! zyTP1^XH)&Nyu^MqSVSg!}#*V#qI`Hn# zx7?ZcZ~x_VpzqHgXTtvT=Yg59H-EOy4E_0~aqC)((=_dSW@*c(KdsqW#j!C>%`+); zJ89=XUu5vZV~+#r*1xm=W!}|Ow&eXK8CVaux@BZogvDsJ7mN3d#LWd4$e=z*CukWL z(GWOVlV5~ryw#6zT}^aBunLxkrK%~?!WeorE+K*Gx)o|cpR6?2IDP6-Qcd{S&gu&v z{XJjwt?ZxoUU@v^y>h$%c7N)j!IR@45Ou@rz3-=k-+a^ZX_Mz3bm82?+dI-fE#B^* zzxnd<^qW)P)V9?g{pOm$p?`AF|6{3kz4~Ga_fnS12DRUYV%9-#ChDC)Ot@2@dwdbN+-w zas+~{1WF{DT9;6Z7E$x@&Pv>Y+T>CTQF5?CW>C7@$b6Eg7FRpDJgYs?32j(-@ngqr z4JQBJnjcJmXui1Wa4PcKhZ{_Ux-!~RGK$@mqLu^WQP7^;y{#5LJUn_+9^DP0>gh_h z#%kvmsj9TIB{@PH-E^D+I_d>?xhhzQFt076Q=!=s70gX((f@!&w3GK`jxD1kvC(&%ioaR zi19|VMuufE5rnjn5{gtVa~kO~MPEHfgj7;wc$Rv*K?VJpB62=5B;%M6aZXd36K9VW zp%_I=2y?jk$}vry7GBmEtykZy$flz_p3Y5>`^pU48mUH$ME4df>we-(k5^C0fo@ zMvqt+kNY1lYnjAV=GN_q?+>+Ai7KO8qrc#B|31+zTXEM0Vwu1uUf3-Y7oX(anRx9g z(?hww0gnPA@||F~{PV1x@pXYl8P}_dx(Yo_G|`b;^sF8rCaSoSUZEEu!?>TsDIVu0 z7H}K#;-uKsz-v~yr}L@1PwqT_{9nO;Mz<=PHc`#JR}J(&n$YO*N(Jjg=>^y1ila82 z2QF&SF8W+>hq-NQuQZh?nC#S!K)9-PX@7t0fyo=j1>P^I1;6)hzqO+GN#WCb{D$Z0XH$z-0m@R_ zWOX;fkZPa3b3r;gB;?7dtj1GjzjV^CqS|{{{mBD}c=_N2y;-0E+BG|RNUZ6Rt89dB)8F&dCjewkmyPJOGTyXl3uVLUP3tFT}o z%sRqn)ujKql}Grss^a;bKO{ZwzR{ZX1Xia%nNo5z1}K+4=xt4xO9_`(58dt^#+7tT z_6m}d{`GnU^#R||U|VxkItM(9tjnQSbw$$c%eggzbr|IHCMnt9ma~b%a#yl_Tx9~P z4qPWXv^X_7x8>X?O^V;+eyK^##&VqI`=!f|cDXI@>Y}z}JP!DG;4$}a#5$tro2X85ffn$`(dEQq zCbz-CCi(uC{VscJjARMxn2*xz5cm*SxNU8(z~~5I)J+$ntUNK1B(ogDvyMFLUY;h- zk832Gu8fLJs7tjeVz_Pt%R_wTw9ki)!=wdfU)xbU~dNXD?;f@80RW+aK; zt5%59tBtqS8;mJihx9XB7j;;m&OU0M7ff6pBQSK3sS|wKr(pZZA(4}EI+o@y+;S=w{m{mCA9A-f*alHkvVbC3w$)yL_g<`VPNAid zF{WH!Cgi*KS5DvYi(=_7%wpPzh=LwJk6HJDmM3q5s2cGy-vjHbDP&f(e^7sIiF^Whq- z{oy|i(_Q!^qj*t{IuKF|{BXvs>&iO!?Im`t%7;{gC--wJ%7sPd)!)ns`WD(%10vwdG*@4y#3J*)paJ!Xwl)zuy0f@ZyG+&P(r7TYu%J= zulZ!#+;WzDoTOqoIosQDEgG+;FrR5uuhU%$(6^5GMhzJ2SD*KqJMgV-sb0vk?`QhS zRj=FtHM_^l`rH`oTdRP;#BiPP@a(hKSGneSxHgaC^A>PJ zyqgoTUY40Uu#{=jlVEFMg|OqeT%lt}eaFAqasOM_^AGJde2_rZwg>j&*vsRDYO>kk ztIjc3N$1oa*D=`v`13V3lQ&Xblt^XIN`;uPvOZ@_1Ea^<^`EB;POzg0ms`7WK+BkVr!~rwF|9yy(p0zWyh;HtPd)NV=pv* z-q$!XRB$SVayZ<-T+h%Ffc6WQTCpjpBseuLAY*2{j@i~z{ zNLhv{4DBo`)FU^rz9teO;1Bk??8=wWI{S3;c{qb1{dl!vJ6hX!?iYVJVu%T-5SbE+ zW2&&*Lg!lJqM2Kha0zlx#6lybwzDIIjJ0=oHxg(oD~k6U`+XSWj5`Q!F^~B;a96K4&gyEZh|>x zqh`K}McDy2EENJm1KBBx`#kKI#O2#5NbSD;}+h6%LZ9POk0K+UB&oXa99=`Ol-9f6h`0ll6dIA#?Gy<=2z9gx*2u zc)PAn8E+f4vO`1}c$qy9WLR|12=R)#w%kH)00o>YH;IvO+lDL;#1Jj4QxUa324eb| zajqk$$}lQ+0*Z9__#fjF`L`aoeR)Evg=EIbBh3V~v(m!0RBXvkJ+HTSzYoAIOWlpD ztna*f(RimvM6Jm#PWQOsBK#?8{XrCDv(5gtbng>kWsZp4Cyuu^49^80zHbU-P{HZE z`K8V)kB~v@FX^w(i>rS5LrQGm&PL)6L{-NL_8~=}6>b(Kb`ZgFp@%c`;b|@TDbKJM z?_n3;vn7N5Bu`DuBHe^fV*xc$#NimZVXO5OpG`leKfc=d@+>rWPlE5IQ=S>|nP$G) z`|;^f{(*fcPgOjv#>ehPI%lPKE^X0vfw}SX*pa(J%`=>%iO(_LjJNF8u|VHzLJt4_ z`uVrwWW{0u%%<2_Tl7HFQtkWe$VBtY-etk`@2*3)Wf%0M)lcdzue;w$uy4LF?_VD- zYY5z!C*|nAa}*(lT0#PKllL0-W%ttSj$t`IP9rpfV!|l%{j1c3uxzA8K+hscEc0Y+ zH}HAMfVJH{_@$>hF&EVx?sD2G4JFw7vnbup z1K(M=i#<5C%h&C#xT?B0HdQ0>Pa7VI+2wPPJ>k}mv<1H3v#j*JU$xIx>-hZ}H<{bg zZkRr2;|Ffe_zbWHFEQ~!$a(-~X;eUiGa!6JEKV4qN2j1=w_5qhF1|5&3{QGWC?nG* zi!c#7SxC(zQ2X!f??2s2zVl85S?tnKCiHEx{~R{xKOOPB@b{`~St(P8K7D!^&x!c4 z@XIc2!t=DY(G0BNx>wih!%832ml|R)CksENIYsXK4{+~6p9D(4t2fwbq zsQTeV@F_XIedYJ`BmUp*wA(c@71nd9;X@@0fH2&DK!{FVMX9Pf6UP&02zvvy9*_(? z^qexH8nFIbHgK3Mv3W~ZA66@+onD4R<2|)HNf-FXt3<3VLC%XtR(xY0C*$m8A=JW2 z)RN1ROO#07a)y4ey@xoA+&~V-x_=NIlnBqB#+|N1Rr#4Wl1{!1Qvg=FqWP+}Hw6xU zEd3LwPwfcCDyj0Z3d3ay&1JTOaKeCObHVh5Te31I`?LnI0w4MK7%yLaxWP8ZO}$io zCToArQ?1@OFx}1)RDgRNpQktm3mA46B69rUd@G!iu(`TPNbgO&>Ucp_2jlFzcrVsM zFP|$)-1Qx49T6qct)D^pv<*GC8R&&n2%;W7NEg-n8^GXMt;cKO*Xd?yVxi zYRHJdos%$((EDqFG$RarEd$Hi%*EQmfz?yf=VrM>tP2o>WJ>RDpy4mrxnPkhMp`Ms%7LBe z9_fz#MkAxLr+tBzMuZNK1yr16HfwlMOcL*=>Sc^`#|FtH)gVlb5{cVfYIC5LF>GR+ z!;-Y>c8)@oU^Kh{H7K*7T6@Y|jIn(P>hJ=5GhlWs)N4JZ4kEDqxwUXpj-Mf7nizT2 zr)Q|X0Rw;ip?-`ukU*gHXdd|r^*#*tIh7M)OMr1mL3UO9TTra2KemtIu~2Dpmp$B zMX+cgaM8K!0Xn_=aQtqG@& z^&v3sSquKwn(t80j=|355rpa<#??hAqrqNqScbe{=425oOqNt)C!xr8?6i`b8hY(< z*im4lJg?+Yk(u;X9oZN&ns-nZP%*Y@I-V-!>We);PBS0hKKb0^E!udmjX~OsdMhqcEmxpcaO)*@q{z2GRzIZqL)%-SZirH;41WB)iR`rD4(Q zOsY(ui#?Ib;0KffH^>#+=}QfH5!p6+JX>EZYLlgC&`>XhU&cKPH>e+2afB{N1bMnX z2~vT$FexwD5k8qWaY;CtU{AK_+2l*iZ1q?woTjy16B9j~lW%@HcN72Udi6(Z;L0vCZUObGK%of^>P8(xA@ z-0kUXt8{QJnjyaziPtvXxZns|z+#nK&pseTNgHre74_Z16GQ(gJ?oDtbsTRf9AoL6 zlvUte3UjFT2lDrcf7gc=W{z$w;dLj;o%nOHG0P0fTzr9&EmhF+7UnKm<#l{(=_06u z%5Xq;;vcmhm=7O@0O2}d<)&J=^=s*bfEN-L*Xlb%s6iUGw2yJYkqt8_2$PsCyf-VG zN_QSv)Qj51Xl@M^Wns^AnOgPY2`xP0G6m<%tstRLri z=sK>m0KYc>5vT03wm7l{0W4v`(d>on^-PX%c<%1V9UUtIJtpW1TefHGD{=&cGPTV&1xD3_SnN|_x&5W znF)jY*FksU7yo^~{0Yhzfw_9=?(HeKlsnI4NTV_7o-iFAvj zX*|V_`dgC+U)LbDqvS}&d-V*4TF~{3^*_Xhb$PL-iGr%ir{(c#n+ZX89rN?@wtniB zN<0flST~$4o)RVuh{Orm*p8?oik^`XBeVB6oBlZO@x@=wca|Bhl7~l0GG{TNj{i7h z$CmYWmtG%_zU|5wzkAc`kZ)|)AClRl9%n9(toGduzBm!i{V~556O2l=6%HwqGwvs| zBObTa&DI87Ap72!W>iK^M%u2}V&0}6W?iwIcYr*OG$Q8cS88=>pmelLj)q>JWKT^; zR)^oZb+@~$r0OzsUxxn#pvO zDlwI?c=#}Vz>jnhJAn2?+9el}Oslgp_Epe4NcGrI3e+e@K!~xP_ZBnQLB8) zvuNB$hcGA6E?6HTKUq{CIS`jv>zRN^ok%bBjZuw`f-ANwaud?#`K@+wyY{uH&-Oh~ z1M@M@s34Cg4B1hDu0G|g+e;D+8bnA1NyNe)Zt2!2xs2tsOhU5N>pif^;aX@PD-KGe z`D}&kgountPp3~Lin3v)VXcX3Y=PcTKUM?i6#7)VQESDFhtQsO<0ztgfn2UXUaxVy zR_{R_2~JxZK&&9V<&Lf@5sr1y1T)}pfX4Z6uyr*I1LVwpwtR<6ihNo1*B!3MET7I3{{hHfaDW3-5ZQ^Sj}n!C{| z9G?)s3sBTIVa+VXdlE8D7UB@xbeA%jepFg1sEXguJh)#jqHOqP_x$01Nl&lh@u~0L zFB)7^B!%KXcr=6;oEUv)=A1#NBWS$bcOfusFq0NMrE^F;t(H2YY?<#Fa9)Nm%BqRF zJd2TOx(cO2ANu=Fmb*WL+k~p&JN*_R$zug4yZE>&h^a*Ri$X-SC<=Mc|CuC=z2WxF z4Tey;exx%nGtOqUVaNvy8}j3g%xK4cd^vSAJ|){sE+-WLkb-#ee}dxMGw-D8KOR5q zm^QG*5u7rRAxySPn3FQNlvO(d0Vl$chu?d?{=vtp|4K9Q$zs9G=?l!lpGZrGJbfG3 zqJ{`ZZS!;1SWT&;yE_FT@xwBz-xKEI(>cx)kwk=*i|f)@p+y5#%Uvs8v$;ybtSpS4 z-+kia>HZsGM;ZNrySg(;G1_m$f4{6ikZxHy$TwFOA0n7Ya&R|{~a z#t~}lPGo&FuzNM-6OIZ$t>0=?L`yEwsT5MVyH|C>PFy~u^Z4{@74z3We!nIU{vhmC zFyhIVrgiq)y%_JyJVG6aTL0#vHT%w;SvR9zZsGdUV8TNapfva=y0Z2LkW-bawks~o z=_L$3QnRI#ovqw@1Zp_3@0G}Is}=uewpINHTSl`Qm|698O_(jmO4U7T;c3o6m6qkm znn{8$8!z_p7BRje3~ymclkCgO#Hh*bkGBicG|Z8Q+jQq z?DUU~w|O5oDQ!c^t1(DhlSe=-8-I%lCY440}4T*ZT9}9%#(N zk>@s>FY0Fu7sl(&?+KG!)3MUDdnNIB^bh!!n`R_VLrMAUp(dk7CZ^rl9oeTfj`abiUw(o9A849Q8 z%Wyp920!Y}NSOF;n41|cQyDfqbm&hVffH40Q(f34L8k3kXs-GHK>IAu(_T&cZ!9t> zUCdXGUPt9%w6qqTz;$nrGH*(tXfQoy2`r0jH>U}GlJ5pl43k8?4azZXq{;t+T;y~_H4O! z+d2Nqg)wgp{4LiTb6neE=#!a&;az@@_P!&E@hEYNhuX~fxv1Ulz9LMU=d$-|OD)Qp zQ50I;=z}?b7%~S-pYp4&OehsP`%$o_Doo^3PDqwfk6&aUzeV#5zs2SqeVA~T=Ap=m z+hO#AYLnlV%eIj)d=As~Ot`UG``{ueRj3p$s_ww5(MX(eR16N;-;yva>_Yh2ydsa_ zwU(Sq#ABvKXyfgGu&*n;LC0M*79S%2}W`lqN?36 zGd&$5gJOIPTAknQW~=-xFjRNe1yV&}WAm_by{b~`L(y0?dk}e~V5NBrXIKIpGtsRo z^w0-Va0}SM7yQm{M3FINSn2O;ZgNB)X&_TLWhawHOdW<_ic7^dnRNqPZX;CtDJ*O7 zrsIgGD_xIi^fEaTTPBq%09JL_PdGBI~6V(?d_v&D7d2+@>_9(^gcMh< z&JKmFl5lxbA%on~3RNIWn}7_V#jeu_y5PVL^bh4Pbl?3-GM2e*L}`4aXPngqXhJFM zT2_XmT#5*y>=1Z@&xvOB7zz+SiG!VE8b#7+nN>+deNm(vu9HJ_kCzuPtD7;tqAc~I zv>9iDOGnoGacw_D-qi)N%y(6~m_vCLLUIuFc@APUv5?60e1V0_5VsVYgya&73PyBvUE!R0( zh?C9UO!(xD&$diHNRNvVw2A8paPSqmp?G$@#7na0-suhhfR7eMYqk2hEA&)3&b5wy zXFttHKIPv9Q0AD{J5cCz8_nc?OWnd=IKP)Ooq3mxBg;Nz)XtY}Vo^?w@UE^j@#>(GZJ2-`2m#_;L%fjs1y z>s=9cru-*y6doqh+|JV+UIS}NF0DsT`Vp3GCLMiM=}Y7#q4S;8DuKedp}|<}9OrL! zqT;>)6NZR5nuthS8XI8I7BW!zlzIU@y!nbcjfY#z=A*~;SrNJ9bpWKIP*Z=BhMOLS zQfr+&#mT{)pSKtP_I{ty{fO=@2@Tbi`qgra2IX=unae3%6{<_r{*ZEVe9BHN#Y8;D zpTm33PSu||s6A6gWMuhSdD#Y>A*7)=);2Ny&-m#Sgw2SX2-i2nkn=aGWGynN97?@F zaN(!MIlTb)xrTcmJ{8ztwIijj=nXE9GV zFoUsU<@-;s#1{W-+}1V*EY0yjHN^T?2*@*%~dUv zXe%c%L8MZMee;G;jCkwpZAX6$KH7Y9qiFeJD4+h?{@%&sv2PkT~x&B}$&G10j z`s94k!J4S;JnDrudF{p<7+MFveIxPKr?j+}COk$1_9rBs#}85!fKs7@ZDm6o;Gt&=QLG+i~jphlH; zo7?Pfv0Gc4u^07U+ne2xN5*8Ox)hE?{FZjHdZK<{tf*yXOLq!Z5i-PW8F0Z??z37R zu3v99`|`$Ty0G70=ubez;vD)rX&DCMuap)J_t6 z@ywUD7V!eiQ_@q)wSsl&tAztycZRgQkcm1=TMejZ;z_XMTLR)Kv+JD1^Bs*|8!AvH zJ%+EfHxr$_jM06%4BLFJmPpF(k$O&L*lvEQx9b%pc+}+WPcDoZ~D1m z{^aNxQ9o=$mLr4%6;?tXG*Wlfa-2E?<)g~gIO13&tcNCwjH@j}BCG^uG*?*tXBg*b z(XI2)zM>NvLMU5wTZqA%W8-i=EJyu{ySw@hf;)k@i5oLi-QP=dxT0k#p|mLD@<}y? zC_aB?cvguY%-C@!y)r&*fu%jingEJuM+I~dsr69Re$~PskFSNFxx+>1$YkTb@aI6C z#-@ke0~QCLeyNB2@gyuFYA>R*$KlquCU-XPTel+ezSw0Rso8b1`1oZdc|v2Bpam{t z0E>QV1Z&%8bWZO(+Bzyp5CV13a#u|1kC0i`1uPxIN-<%7_0m)F`HBgCVZ7yWOYgh~ z(I;}mV}{n7Uu~^#uKlGdYo4!l9-%w6iXK)4wFHhZWzfB8(pQeY1=*wx01(a;NhO;j=_Iz{ zM(3@jT6MSf_xtbk$X?xl#aLXkJ`G_>uTLq8@CC;jI_N_8-Jo zI)NJ^vyQ>`z>dV>jqDdtIW%ku>R|~hYn5H$TKYki8Nnv7>f;%CXv4-^Z#12X^u?&| zgs5CmWN^HtCWq~&&4y75BiB32chNa*Jotw@c^y5cl(Z=n{wQua~P5zqIU-kqe_}h95m=g z{&)05n&LPkeGQh^UB6@H*l_F%L=pVXkS+ni{VO? zloAckz={C!aZ})r`U%m{wWI%8pF4l*_NVU~+7<6tE&ildZu{=-?Q;?b6|MpFPb>v7y#hKX-CIBc}79caGSqXnoc zZR&N&TU`mrha@gyuV1D3!?9sx;F&B)JU~CG({w*`B3VctaHtWR0^j3X={D=Hy?>@H zex3APzP-^wUX&k{_HC;X*s#~4s znBY22P=wfo7<1q)5}juq--Hq;VvX=}EU{qkfJgUfn0Hmx98@J6gvk7R-thwQ#i{T4 z2?swW4GPf$nKl*Ic|cT&7OJ_}U4WxUs<9I0Ovi37f`w^dy#m;&e6l+$_T-a&-$B!z0hoz&#~VFv}%#;IT_ zP`0K6M&-ehJ_FM75{$oj**($%W-Ut06QrDE4W0>1k#~2{Pki4GTSpzwH~=M2qi{ zqYU5ccG){o( zS%xUy&GR<}B7k{FRgBJs<;Oo{B$bt2H>8a~ohNUeR(t5;`mRWl*~rrLFHZJ!Vadda z0Mwmsq&7H2&%vIzJ%4ur)oREQ>P^7aiAX^ZFIiDI<3bACFcbcf|L4DzUN=m}@n8P@ zs8=y@RXn8!pLI5iGRL+FH)LeK?quC{bdJxSAKduoA2%*3!%imrjQ&uR^S-^&`oOLG zt>AvyLBA${oV(*aGZFJbdmDi9#AEjUHi{Fs1LcTU=VJCRiC$kexzr>6XX8jtBi%I6 znS!O34s7qe6=klk9c5UxNFRI`m zQ6_!G6TbN}KPTS!aPl)ih#7Vyyy(<8u|XGTlxpZjNQYShmnO5?0HRP`@BGlcdyht5 zCbhMp<7Xb=`^Ms3OWGUIKytpDh!K{z@8$75ynWI13yfl%s%2q?d;q|HZ@6bh{d=h` z)9c59i^o2#Zwh|m7$hnDzUbR`Z#IWDb>(9CjhaPyTx}HL#ke}^uA{ND8!;2MV$+A< z=fQm1{V1A>x+a@$8jNh*{GKT6aklUxYDVRvAX|pR40Tt`7o7)qfZK5CI9G($TgqFw zl@Ke2U6+&3kKK=(KhXR;A^Q^li0$ez|X#JWtd+~#B;k(U`)*CU6Y%|s2)JjvP6;H5g7z)!6eZ~<9>l?xl zRJz1Ag>jwQim&WVcVp#pyBLau(;4WPyk*CUCIoohRZOS+M5RRJxg(KYa2(tWc}X5=XzNBia!O+y!hVz z%TxQXBd{5wfn&I>aZ*O(HjX>d1hr{BqrIlil9qLBN=?r)hhGF384I)OqFvP>lJ2h7t6J?NiIen)SH+&%6AV)gVJvXXsCg!=X3u9r>fA3z2i3~%x`%%VzqSD= zq3VNy(|MS60sIz3k1%50nJ5}7ctT1cl1sGnjvSu4!!Ctq%|GzB#%OER^4inyE&cr+ zNycTcqhMLvjRAgP4WwmS|A1`1aX+{?mp31gcV_ZI#JQs4lAro}db)nR@rN#zRkz{C z&*{DeSML7u&qf=gvMm$B_0JiWi(MiMit-kBXnSbrs`oFb+_UV>H8uMXHrfxiYw|T2 zFzj}Q@ETUE_MglnRlqW&%_w z6Q!02H(-y2mo(v0b$smt*M+rr!EvtJX8Y_GTU9JfWv5U1SL^s5WXe(OXXHArWy~1>7Nq9`J19WzR097`6h?mL8+A@MGG@HbRxD2*eh5`}yPXZyRd~u2r>9pLwOm>=<6^hf9 z1h}J5hVwH{nCt zi8)xaO(gR(bp~JKdgvrM>P(TaHwOjDaui@yCaQ(;kjswb3vE_sx>-1V>X~F!Vr)NJ zW?^jILfHv-Pm+$liD@Rmw9u%-OK$Ev zjIy$iX~wuRT)5_{?@#ug>MLg)OaoqLa&ND$(igaL$mkE@Ay0y;14lY>NyTTf49V->!>Z z4|T^4I^tE%J*b|`&sXSHW$8^fmu@ASJJBv_Pl^T?^HE;SRMEvUeQ~5H(mc=qObHP> zt_85HG&*EV?Suu7SwW7MI>KodU0GddbQW*wlGV*KY=v{&34M!tzhuU_UbOZRVI3rs zJO;(jH(tb#KkSTS>c%p5H@Y>ObIhUwi$bQ5(Lq8bS}DVfvK19MQbogb{fu8y{D%kF zy13eWLX?56b20m2L}G&lIGL%KTux0_=xPDrhY^wOY{L>DuPnPKtP8%hp8V=qbuzfW zhZbtc$?O?>he)<27lA91=qg@TFG^4(tm7p;?WMyyvS{i$wp}zk9N~h(e{lZDPBkhv z876C=txzBst&=DKhv{H%hZKOYWt7VnMan2>>{2GGNA}`)ENaE+FaLTH=seeShPytYn309$`&~ zWW=~6`+8yu44c6@f?aAf2k$%nciD%%JZ(mabeVpZI#p{nvyLvXf$_z|thsb^c`_o` zO@8h{uPg*+WYwm6C_md&MyFrwjX_H^+K;gMLcbRx?~R25>WK z^?<150W8AnkI&$32fz7fSjEw!<=B(wd;;mlPwEiaS>{oK*%X+!pFM#NG1<+NEtRtO z!rC%J6cs3%S8KkZRiunN4%~8I|#28)-h#)+;z7heMN?%tX^95 z9-B@4r!;UXB%fRq(I1!1(J$ee&~yY;s7)9tKu}yDQ-2@A4-a=64!5>M$V> ztpL#omr+3lLsDGQvPJ<>%$zi`eZ~b96%7FkNXrFr$riVEXUttwb7^r&r_@TzYFehI z>1pQ6=Q)1Q?|J^YkBk5AgX21``#R6_{eHdmF?@n98?#HY(@}SlWGJFP!ml6(o~Dg! zNd)AZwt9xQ_#w9n9#5>K0->uK_?qoL%4RCUd5i&)+3JL*KRC!L4Yl^KDwunSEHJmX zxb1=mGThkCwSdAL6L>WHX(LKVPaHs+gfgTrAnkN4rgV!bolzU)qfuub| zP8-$I`i|4*tWOL`>rNO(iJrW6U}yAglj@mWmc^7i>zrWsrbMZ9{QEz9A5LKAhNCh- zgGPle_1(nVw`Iad73VpdXM@O_^s24u66a3%`e(iDcoz@YZdY#gZN@-{G;j)XFJlWl z0}FWc7fhmxa8N1WHR+x|8>)E^L>OL3JQYl%&=!ub%W}@qDdR06M~XFX77ZHw|LP#_y^hjJ(94z4`+=0I!qMEjU?WR=n#&nN&5 z)lUa6*y`9oi2@2Pu&}%)^(nLTFCHnBV4t!NSwi4?<5tr? z24(%GAb8N8>DiN$#4yT0Ku*HFV8rKQTU^<)z`kKwQM4L6UCz!WD0AZj0&qHeV3jL` zGm!E?49T!G`_{2o_4X_EF`GAZ2Bqfobs?6$Yv%n&_@)!Jc$#y zZHi>qQba}pZsH)Y71*BILoz9PFGQTzIw1=U#*G{!s)Me_}ZP$=6ti%{)tQ_5H zOeyFHvr=*i7VXosf)+;Zd|{_Wc}Lm(+6ICLc1%5;L7QL0<}suK`LLvXt-}(f?a*-K zCfTkQmH?mYrwawX|9L!L;~@@ZcT=S@lj?z^+MCOhR-B7b?3oPE-9$HpZTWJMWzA8y za%izoC&CegVF!Wr90zLs{pGMtvF~)|ft1b_82&{WxlC?DG+pJJ|A#GVc0At~Gpu2x`Og=iw=MQeJNxLH{WuM1=-05}n0G{?YuD48Df@+^PNiLGx0o$(D%_`*|C z-sZKHA%lB( zKIq)I%_jcq^v_xI5->EJG(TCM|IX|Y)C)2E(;y%gQR9kphuvkA{q;womWQ@ zXKG~y-l>*njG4vy^3ZF)$^LvB&Pwe=V}44+=^nH`AWhVD_qTl6lE7;4u*6**UhT)h4M zIj(}oOH10_EXT7BPnaA!Gr!|L;Ag1oX1B2iA-96w#IC}APi#4Pl^D5s&8Cx6lYH_} z{U~Jn$L4+82QSX~o5bYWmy*y(t=>kISGm>e>{ih+A~%a5jgzfStV&d#0ulPKexahM zf589hQ+eud)V{HUCx=$@0o6)oZ~Dr1c6WwG;+VOrOKH$^E}D=99SqfQ?N^C`tDdCK zBYL+l7U@SA9edZ2SlK}lfG$Nb__0>-&(KPW$905vg~4{94g znr5HCT6Mr8jjPn2;^_bVM(FtK#*m|kQc8E_{#>oO=>11|DD&Q>y=4#TN-NU1frn7$TPqTc;Tr>dXt;>N#8LW|A7fjlbPb*t z*VDiSvt@OR)HH8#FQ`}TFUFh9_HCzhv|nqGG0jXlsYKf(u>QUMS7ObcCA>3qrqVu? zeMRqH(N;;;->P=}*P?IL%}c}IUau`1qk3-YvzjzORM;x6AVZ;PDy!KNXQ!u zj`yp5cu#(F_mpoMsSA9mtM>#OJD9%r@y&Ie#bhnNY1g9%$;H2x?u+}zNWAyQi0W^~ z9{*FC#+l+<)}olA5D-tOtqN)bCT&<*)E*3HG9HVF-&TAm;u&=zJ~9vN+dhYgCO z9nFQJ;sRo!*sAuARm;Jaw|Wc~*KM7$0qb>bANf)oCU10{5qI(PRsA0FBdd@trNeJ> zxSMa|8lF^uTVVIIl0^{k$f@e96?+f+PvwPU=S??LI@r`2>Kbfri5akMVr#;u(4Fgz zvnd1B|D&T=`tO#*fA<{zgO;Lba{O&CH#gKbH8h-6UO1&VTi z6Lr_g$GUufhn`i_oj7xRMlf$Y%h&CXYrH?PsJo%h*a zYF=*a8 z*hLN#zMY4SRw5P7P@NZ~Z9A`jKEKDIqpk)AGe{qyOMdo z{X6ye7MQjVOzY5{rs`@bh2W-*Rk#Ty$-nJy#HXOB zXLZ&vUrCFfv8R3X)`uxrfd7_r8<4v|A8KfB()-D6P4tr8ttpH!8ZW7mF#wxxC3xZn zsKdApK1U%KmE4tl;k>(Fe(p1&#OmOL-Y4*a{wIE>&KvYxB4co*7HISaHDGy@ zsgK`(8hw_u_eQj^zj^QAw1>=>B@#<}MKFrKF6i{&a%*gLcPlkW9>Q&>PZAYwrVO&T zw-<#$A$w%mjuo$%%cqj8}{10y4LL+WAX06!Ak}7q>-q+9Ke=q-F(Cz#jwo|TaQp~ zavSnONhsT*zdD6=QH08-(2TS7u=g)lelC1Pu!K$^AHqK_-!QiHtto2qc=0B4n(yeC z|G>{=hvwtei*=5W^iNdkhuC6GM6Bne-kPlEOg!ZI!>;SA@OzmPuIqa@pGB}V95F07 zuT^W{i>fvO975RwNRfc^Lve?8?BaTMF@YSX%tueAtmDG2SoO|CN-(F!A9&DhL$i^( z8BYFzZnj~f!KIt4Dc0OWun5o;X&7C_%VPE71S0u~&=L=n5Q^wKX?*o+mz<^&!3kQ=nRv)UvRh)b-|A1;_7gg%t{(b*10}Hj+#pQV&W`2sUrHOh3 zJWr*epq9BR0l`~+Jj%5Dw)V200a(=gSp;iY5PN<1nZUg!_W>WJ2cP?SQ;Op7!-G*S zFlR?Iz`>m9_}#=}wzQ8cwZBPYL8nd*QJaD}yn!q&{PB0!mb3Gb*K3CNyXJJ@VW=vL&m9+!_~J`^0?IF6 z1a!-DcYoRQwCq3qTbVLLAMnpM`i=R*-nx7BAO69Z9Qk(esNiPQqMXrZv-6d*oMoU3 zuw$2jN(z`_gI4{LTFDF3mV}M9jf^2MN{xN<07oUrBZNw37-r~wIXGw6`0J!AAEjOI z%gZPMQhii$B2;PNtaf>xb^VVU8zX2@&f(W~CYVU7r$fu`O%PUo|M#Iou=B-_kB2(p zncd_+p}XZ&>4KsE7el~5UjVNxYkG*Hzbs%6e%EUZ5t>dTmb|&)ozNH2o>5vNs#UR& zZTdl}P!YKmk$xBLkG+)NA5gfIP&-TslR5(G*r;Ic?WUIoXsy>`7*_^5Jhtso{;A8E zN~l5j?17xcZ9$Ht9eJn^021r$KEN8ZXLTG&o1f~Njfyg?=9lKL#ll>x8+L3*gSu9? zpth|tQ767k4kYOJ;$UFF&&DQ0J^F`_rq&N0`roC8$-o&zGzNZO%bfzVO^{^+>wPzR zgt}&I%(=c;#$;vCE8txe1Oo;365)AxSg=6Bw5^(|8747gnxL5sm$tPU>$OWTVKOng~fXjuekTp_wkh}fIGgIKkjiUkn+!6Y&>-dxfxhbT3j#( zNSgQ?06!n0lk8BhquQ^ej~+QIY)7^+J-@vgh%X7%`eC}v0hGj{g6fk}y4CY|$=Tt4 zG0hCZ20OBb%f^S1qPrxng!h0yG#TA?`<0^OcW>&1cZ%=G1a}%Nu8Il`&-{bhFd5iTtO)=lvV%U3!qc!{^}$$hj@1EY3^#rL=O6vf`&8ms&vJ5iA;YAeAgZs3W(OGUH@W4f$Nq_+8RQjj&&-0I^p0sup!B=kVbz~HZGeq)+hh45)l4~qT> zrPs%}Zp5ZK>NY^v;@*X$o6juMs7JSL=(`zmEqlAZ#Gq@> zU{3D7Ad0iVJubt1U;e^-J+jYliMYO@wg><*VP?HeH54%l6LAc?x>mk7ymOj27hzS$ z5HqABUquFeFu*3Lb5pG@_nKe!S!RM-U2H0u{Tr~4+}YUN9J+h}0QlO|!tug7YjE5T z5jP>pmBThGvPOxeb5VUqH(HnMRgsZK;F6;|7#HZT&z!EBFuQKQC&gl{B1+0^;G3)! z9j?4c>jH-wZ@yPNRN>fUIo_=%TTlCD=CTSZA&G%D4Y9x9xU0jzE)vW59G8i;6mbSx z*qmTIo)q8=%R$7vS2Vgs?Z0d{9qMRp|lt%B9=l3m2Wg0c5~0oV~M(Vo*Wa{U5I-| ze}?{I|4WAYw#Gf=@`|4PsCwwP;X7R?Zoe?3hrjCYXmgA?CLolFl{Q7K91NU%fHqwe z7c}3~YHSX?oc%2UpD zb*3$8ZeqM0VG9uT4oYc7hG0f5N{7|cNc^jE%R@26-?fE%q5XCfOc;Tt8-GgG$v8Uw zc~duAfJtt^`#q`P5HTbBr;p=NUkmKg{3CChW2aN!H zovZI^?gvwGb@uD8bQjuO53U>^*6Y3n__@#}zy@&Hs^6%O z5E->1YH>(#k$zDtP2wD^j8~2I_f@x0YEK4=LEcqB&VJo;AYoiTDt9GIATvLkOI-2`KP zGt>Fm>$_f8IsRib#nh#L%63UXeb5IeIDmbMIfpT#dBi3y@#jrBPgQ_DZM<_BI(#v) zx%m~&&7VkZ?T&d606L9+Q9Fc&xX5fsceO*zOZaj$Ty|0{H7V{}CD9G1vfL>9bB18p z9O}A8&!Orfl_hs8RS`pDf7$wFXRGSWbA9B9sLS=4W6nMjIDc=r`g#)p)CD+xiL6y9V)vLGamjU5N5uc{$^qCQ zj*5mE`zLOZVYUEQ0`VhBnQ`Ha4+dY^lu;X!)iO>{NhAnpR#pQk{pOHU2?y%ci*}`! zaUnJfPVYG55RYV#yM2i6-{I5W0?*WHo1wIJ1Hh4={!ZMcTZF>as4dxl&EfK7L?_P1&jw&DB>&$fqU(P8<58pI$ zK5Iks2Nk+ZZPFi@)j{=7+&kRSvK((}X7o%Cq~3jJBfIl*)0;#N27qk8 z32VTw+d_r6A-1iYoD+4_8#`(&y=`K(owQ9Qqpp1*p2%cyQt91t9t?wa_yDr?FH%4v4Lm7@h#Py;pY!+R|9!FpBu zyF@WmJtNB#y~>`*FIA&d^ zu=%o2Yem#E02J%|dgQhEPv_=i6^$zfC*D1Ry^f6f=n^q(mQ|WC5=DP_Hyb(3bLg+F zg?T%ka|brDoag~qugwr2rkK4-^1!6pqEZA<0Z|K(%M#HCY|!wLpZ?LJke-*WpWyzE zPU=`MCcx8Mf3iKZZqbk0_()m1Bg521M}GN8#QE>4FJl`QH?t>v!>XLop8g4cr0;7J zF1~-jC3wKu_FB3EKe^xf7oGM0X<$J98_4m{e~s&(_PVl?5o7W}`f>utM?NXY7L0$&=?lFMfI6Wjvf=vp8+! zw7g)g|7Oe3v~R!o#5X@^@Xm}c$v+(WNy7`7+XG<9nh$MQrCZ+Wd~E*OV11KHw6I;y zsXpA$OB&Uf79R$N=q>79wLxzM{2KD=>YK~1xRm#sntHF@zT92iADMvF5@T#V4Vx13 z!l@?9MVS&~6CHF=v7z;VFfCk!s>kiG*D*4K5yX+6z-SHGykETvU+b>He!#4Qe>>PD{J$cILAcl9Zq$E>PpmwHw_+QFx<^r|*;R{BY(Kk=yJt)dlqljBS;w zQ*!0!LW1+5(@m$peA-u(K_DE^@gHm@jD_#j^S>w+PwVb8a?P(ALFfLtb%-46ydH(I zZ22YNT4!t!OcosdFv86ros*3Vh;XFl>X+}JjF-*!-E0X|4ULmtu&-vIxwutzJH?!^ z>Cl4{6CYd4_uW2XG#j>X>R?O!a9V0@2%UWE^C$kLFCXCzOu`P)fI5Ew#IL|IY3lC> zpOi~=8}bDm>8Z0PCXcCIvh%_vXYE?@MDiXT34@5cjSx68!Gr$U%Rf)vK8OYUvn%z+ zWniPhj;}etv3d_D`MbxCY+wux8DBYua~wunrGe-);{(n#V~1-0r{R4{^tsbt`$o9- zVcuf=gA@WxrSdx94I5x90*wpPTbbuM5m}bECk4|6mh;*5SSXd>72iXgGYqqPBZvYY zIKIawh4U_}sBiuJ!&;x3l^-S#{}owB>-fHC_BK8_iM{dmiOCRf@q+AT&l5BMcXru+ zfN&hs6mWiZZklxt+NJwFu+3`kiQTDIb-Sv6q$`#$cQp}&$?qLm-f;x-$O9`Vn5$wN7G1~xsGoDm^=~~Ke$cQWnC}~V|$wO5& zXEf%Zos7Bo^(Khqt_jZWd`RfsnaeSysT=U8<+-hUFWEla#J;r8yotbUX6q2}k&>{g zXydQdy2*MY_2aw=w3&L z2@;x3mQQTGxoK&%SZE;*G&xbEFPv5~Dj4DbU|d~bTqe~dc%%gIwDC7Kuess+hEIm% zPfXM6d@NbFH<$7eJfF4AgX}ne`?3i3QpsC$0hH_c(w+N5A3MjgIHIRB2j}2-jm(Di z^eA;!N{c15z4YAmBTtNmoc9{8z_pd6_7;^fb(*;2$hcr~A+hq4!`M`n$6kRIs&w0^ z#5NjbjiXsQ2HRr_T7@YJ=ujIcpqag%c9kq+%-`jvDunEZR5DQ>a{r7Z#KX~r6}`WG z+OHZ3N$(2@G=YYl3+w4>Km1ElPVrPbvH3A5XzHf@l;KT8ZL-33gHhFw?@(;_b$Yr zKt8UE8(taCCOpc?uA7Fe0)#;|+m;+fm7fDQPQCicGgYeQU( z!?TIgo@XL3AC}yXI6v)U-8$jCafp8Usg{F%=ZRPu_WCk@9B!ib#d|MhJ=_ql&AMoz zohXdW*u5#ape%sACQ#8aHKu@lDQh==!Iy{bon)K3!@q3Bu$o+Vk3wu<$mnQW4IvGx zKFS6;Kmuynt$oD&@U+myo9M`>^JR=$r6r}K3J7-!F4h7pD$zL)`cfWDq1R%v?5PWV zcafb}WcEkW=&6rQfp3AE#g1n{i=S7aqX9RqA^$-K;Le zzR)f^i%lxVo~b|O*vhw^5>(PO@PQi36!0*K83IB8&L5$xqjO)ik0BvD7ahxEOq%a| z6^8qGie07;(tKaeWJ61a$vc^NE+^u-)%KOPuW3WxGqZ^_O}nO@<556ZQ^ThsmfRpq zjhhMt3Pn^%u}0;WZo*8j*%-R{%lVmiTWWX7oN4z>*7P1daLXZQ9{B;Tbs|#-xl;LL zcxgwZfy3HwKP=wpjX%VvPvn3_xE6;bMgzrIo!!|`(V1B#%pluh_=9Bw9n1Rat{(v@ zVcPv*TVGgxEB41C!+*R>rzqijTdq*`b|wm)3!Ec^F?R?$xhXy(A$njPM?ZhkXH@0tpK5nVsRxls7@MnU> zhpYx~W#p^x(v(+!j?KRyo-irseCQ+lp=x{n>a`oVm;p3MmSHaeDsycoY%A!c12u`@ z1I`Fuierq6yzPg4k@1$~eUGW@tC=S=6M5to?k1}V|CfR04gF321&L&vt$@WsKHk-(Ah0_N#w;a z-@s6I)*Pl5-1~NB?NIu)H~$XL?yr=0Y;+0=F<$@Na`>Ow`)vm+-xL{Z-`X?O$y3vX zaToy=cU?Yw+i{5$JxJY}WH2H^;4C2+vMF8+1*|jbdP)3H6+Txin}ztL@Taa6g9_p{v9ZY)#B4Dt1PeoOzH_VmSk5hX_ z6?Be|KiNkgWqOt`y?@d5jM&+nXX#RfGJOm)9;^=WpqrZ)CA@9h(2LU$_~aK8YnHc` z-+mBm%a}M`*dqn3zV*8_y_?Salm&Ob#5`b-P4A2lIAan#N(w{V3b+xoVY^*^@AJ3SYVT|2|O(}SfDtx#d%r=NzM|2TS0Ve7DXf6{vYFKz)?=vv+F-gj|75x0F; z*Qx74e_U?Z=jRXkSMP)`PhZOih21#q9C2jVZyRE^{h#&q6-0p1J7u$rM{}il9-T%L z7nnLs9tDNbXvP5%JqY~e6kqQ-yBlgc^I9mFZJVVn_5S=ZRXDE2$EVrlov|E5)|4YV0PEhR2=l>9zK5e~x zyv6wSohJ~QRd6V)fn{v5@~GE4iN~0X_C`F^0!EK$l_V*$o9O|2he54L>Bpg zi1M1GQk((j#h5HGi%)*rX){uN=H~BC z&u)xfT6j0~{on;|j8?C+{&iv5+yxnFY5+1#2s5wBBCBaq*1C2|*??c%d#$&%dd>VT zlXK^GS|KN3+Ae;FZqJS;Ms$V3+&^Pp-vk9VJJ3`;5kdEg-tuD+zyI8xuKU#cBxY(Pi2!po2mE@3ekpSQT~60o zfBKuftk(~A{}KDnF`Q*38tWoND4XR21xP7qCjyV$qFZR`*oZL5oE68T4>veS&YHA5fwY1v{Po0PUupAtN^ zh!OXU+XlZte?or<#@$Q55NmqjZz_1`<7C1wsS7+~yYD{7H&!d2S^$`jstz>1O0Z`u z7;|`l!oW}QO89KdEk5FYVQyX%6S_FsWBunzY;4Ic$7oHn%M612qheP(vfsr(2}49n#UeHFF8<*kqu5BEBC0KijYmdiQ<~#-LW{@1 z(~7aeKrLS9$t0;rtc0-$%mSqZ2LnE2inyaGTH;iY+9~d^!j`)Te2iJ(EMGuJac~SboL@UEN(NSd9D1aQ^2N<*B#oS9=#0 zPgU1BT1Zzjf)Q;-3InZWSZ5Q~R&*o)bfC)GsMv66FNnG)x!+T4`{ziMr^37v(&t}L z%O4GKbT)|Fc+Pn!84j-hIx$KpYgc%ApkReX#L2{ohx5LIn@YiGV_m8#xSnWnW+nI_ zW&)Y+qV(z(M(&&J`r%SFOK}&Wu_c)}sRfbV8&6hmL%L56XbJC|iN1pC@LR>_o1R$Z zjhaX&ZY4i8#6BPXv;~xxnwMw`kcs)S&%fa8wMmS?VeMS@tgT2pF1}vB(db{6UhBEZ z7uP;5C2#^hh-Hil=SVkyXgMmUil(4JPlL@k()((BDc>@=1ddrlYYQ-bG;rKtIU5p) z5%)r)6++6c*w~Slj+bYG?t?%5eCTnarT}VJR4xYGe1GVh`!9bpevgq1MBO91e$jQ6 zidaX^WK%C^%1x@j5JoaS?CIStpxuNUhz< z2*BcX{#1y;U-iYm*_qz_Xh4R__3Xc@b^1Q~z82L@zYTO5d_M75%X}_-X*# zM@uwdSX#=Kgt?9Av+l;ysnyZZb#7W=M;_m!!xKJ~ouwjTIJO01N^*0mFlF=ln}OrG z6DL9x3w|v6>xaXYiB=iAg&WBAkiKHrf&@P|ON8WGfsz+bW+t&rvbV3{m^e*uqic`)>ZWI?SpG3jH!$JvzjIhar#Y8+Z=ZZ=3U*_d6Mo6A5YF%Du` zjS6?#$6~Bd*HRj#1g+QBD4-ShS4^kTcMVsn(HS-k_>CE~mUu`=1t;k6?rQ$IL@A0` z9NiuFmn1;4_;A6=!1>5;XQz8^o$dPPKKYVQpt@G3j=>qu{K;BHHlXWl3(GhSoYFVE z#~qK`E`OW9_7PE-plf~8(KmcBlzljl;h|8XdndF~6TpMc>Ruc?Y7b!D4Or0YZI<`! z;FZ7#wGf>6HeI|iklK?av4@)m;(E3`nzXbxBTQPH@sCuB5?$zp_@SG*k`+`we#~Wz zZh9A0iZ>s*i5Q;c4m)8uNmEd>w#+E05D_Ttv=lv6BZSZoBbd1D_*&zeCo$%6LCNe| z#9SFCOofCM;NA$OeG4%7*V{Pgv1_MjSsTX^3bo|Go#6?% zL^su%76>({9UTi4cS%~}%OtH~01qA>ozX^-j6Sb`*l4v{j2sVqGW}*xylo|B7JZIX zkhUcn(FZc%d(nqw0eD^{k>NCLf_Ip%$(1RJ^qrU=p2injS+T)+t%tm?H(zB-4cS9R z$-*fmUt8*z^6Y~{-#x_w2-vy7;|H6qCZ>VKpIbLozS93hoCbyb=pGcUEObAgTb81qcr%(exB)6nlV4PFHLlF6+O0bB+fNbU(Tfrf#qfa0dYwspy@OO58ZCX zvMNy0D|ddB%;@h>letr=$ssVK=cXZuX)tp=6sxcn{yEpVIZ$cZF0UnZ{_&yhZ;Q;A zM=!N^m3*H#eviegi12t7RTv!RS&EF-yZEO?k;QaGVes@Q23?9-hTKyZ&A1-7h^c}~ zGk9V=e6R)uW5}_;`&;hL&C6hEi%Qk8k8}7r&yO!UIS-QSS!f*zwxAmAp9w_Ta%)im zvv*kAttvspGTAT|V=|d%X;}6m=FQ_-)|Tt14S<0wFWNyFXK0e70)t#r?^TTllR>w0l5{D>;rl1C5d?iM8XRcU6T)y#x_b7 zE%ujk8O&=BwRuppYdBl*8TwS$eH;*9HqC=#D4|lm2eW|;oDy@qqr90;bSYgP_7UGH zKOK~j=rtfAU$`FGPq`dUt0(4O{g680-zf%!A=kJ_YZKkhFZHA zBk#ub0YODEB79#pLXY-Ue2K_OQ!4H!(MxO~hovJdzdA|AmS*E3Z^nEERE9U_Lsfha z%@~m^s8#YRc~LY9FjIsU-tiu1B$1~x6k{`VuLu=4NL9j8K{WL9#$Uy3-MCTaz%0yT zdUiO?!D2*<$+1k6afSrsj`ntdkNdwp`8)6Zb};dln}e%qgfaH-!E(_JlkS{VP@ou3 z770a=4Lvj>5L^K&kuME7+-7kJBf{)R?{gdIPz{+8Tn7PYeMuMtmT@9fvn;60vOhc| zuLschD)Q#`ew98WFkGHlCLwJ!s|u?qwIqx~Aj5H!v3Lqk>l)Z|zUSfF5r(5HkWblZdBy#kifVz~p}5Lw(xj-b_<(b~U)*kD<0 zR=C`xPf?pTT7qt18_!rY#J-mAR%TS{&Kz3))ixILdfUOjhzz*6dlq2T58OLWR{!g;6(23W=03gChSd~f``2(Do@ElCC)s8tZ-_^jECfEi4!os-NE$6 z3l%(~+cU1k##f4s+KsH0{{e9J&tHNd;jw+oiD)ER8)UP?-*^(=rjTjPrRdLSj*MUb(83fZ*Dr|=)nfHE z&X=adzVOT;HAP-eJgh3Ev$Pg`C*&~p!c#?XF64yHK(5$;Ig zfpU-;U8J9wOUXB+qNNgl5Jumm3WgzR)?gzr=VV9K491TAil|{G$&5eb(6S}2L;ijJ z8ot-E#G$!==vimUblOHYFHpZK7K^4Pg)f3U;uxM5D>f_#&;%(+!Bg7FnSA z%_nJcb8IVPp9No+R#j5$psVP*i}^$pz>h4V&FM`i_jI@~1+EMK%FNy_L)!~NCzDer^x zv`l;l{3b(c9ifez8$Q{G00%q;nc%_f4luhYCQ$p$C@LtAC2lGigd-7e~7>{TQy5zhSn;v^ISGW$M3`;2ZH>2Nd5&W)F4#!ZeA$yy!Lh^K&;DGOrh%>g6Bf z1_9tvcbaXNb#zfht$jzMBAg1Sx!seZ<@bHU?y?~3VEa1C!6>o6-%H8Pp?(D>F5fMm zZz@@_Zi8)K`M{4t=*!HJW+$|g(AzU8emNRzg`TFH0T9a*Sy`!RgE52UuwHB~op2Xg zKS`(9g+(4PoJNQ3pHO*q^bk!xmzQnY>%6llX>4i6Jv5vlT^_FAup2d=skJ_O^+uf8 zrB%8#j>qh~B+P}1muGlGDcVcGI2EdruYhubj3Mo(@4^}V>zso|k-YZa?kuC_qqkYI7+g&ysRhjQKM2!rt#V{!Qc>%e^8x}UZNjP-84ER ziDPSl$Placd>DPC9?Jr;f@YC+8T-cd^e-8_JZ1QwM;6KfQW2+*gO|f#{+)? zPR1hp8vRAkpb#TY{t$^7S}5*a*ScF`p$36l`V=O8qw7(LyetARyO^#`=!;^-`d_O~ z$wsS&8m*!^=0Hh6Z`u^vW$a@4`24jZ=|Tgq-lvZ z_T6}ECCS4=K=$ykEljniEl$Gl%mQ(vdSL1dC6opnEs4yUX!0~veUN;>2 z!5W+%^gI~6>c8IJ^*<_zz5m-&&~ARf(26#grt-W(C0@MU_1kz?|L@$>^7P8m`OcZV z{JX%;V+Kp%=XN1(y>`~BjD>AC*h(j_fEjCDqdk_c`uP$BxF=S!ntfCr>t@0)U|U5H z?PQfxJtyVVN6L6Z;I($k@X?p<26V76J^&-E&W1}Fy7VO9jRl_M%2Ao40 z4>qDV0>N)$mq}7BibO`|LNR1!7;i~LwdH8jS}@p^_Sv301QSwc?tu_+PWx9ML$v4R zM_M+|jotH1{cfz=UcbK+v*TJA;aG8@K1+YL903d?ibLq)eqAd`BMnqk$a?S|8UR95 z!R!DxuwhO(<9(wzw?vqRzu9-w-=#)!cmRLl@B?m^{V;A+ygb^7S!T%AMoH8GEg^v1 z5uD1%jI*~sgC{Eq6|)xQ6kGYWGJk(Un3&q^q|O`j}lKyBeQ+Rtb!6r}-Ct=L-sh#UCy63Clui1Na?gt~=_YIBwXe1gX#au5>x zTp9XsC_=_9VP#Jx+77K(`%6!k(WN^-@V%Tt0lrB4J37g{W!5tN6jglgWES$fkSyU(XX=`(k&S`k98dQAG zGy;8GOL`TVCdJw7ZUXruQ7-AI1TcV8un~f~I}mU;!g3F(4`g2paPDh_9x&*yO@tmH zg3&WMwHcP?RiKROZh(UokUW6PbgM?#xCfR$?ZI9c2kPi-I>r{ADPmfu$=NU0At9Nt zhP;gb_+S}JS}3melVLSct+myL>rIzw_rYB*Pe0!MXZg$2_jNwA;)ULsw*ZT3+}yPl z44??U6%@@5j4ww7wBkZR!4;r_qH*gStq}(@YEb#D4#w!ZhHd#f+&**|A(} zd9c{~QkEgyRQ1=A5oY0SzdYf|!E?EN8-Kv9Wo^~yFBi;1e1w4!(g;4vL4+*^qV~@p z^l!v(7x{u|o?a3LPKw4M4O>69_oV*r)U#!8MXx$FhZzzJot9z?i%C04(%3j$a!sq)?AEf z)9Uvef~h~O0oAWFSRF6Q)icRXd0O4At^KGUNEd$6&;0YlCyTQ9z@gu6eSNLQR!@$( z7A`b2r$n5msmUt7Dl1S*r0XpHHDy&eXa4PFqx+5f+e)|kW;8Bb{iSR!_uI*jAQ$IQ zZjzp<2vtPu#6>UbEbPtZx&W}K9W#9*UHyJg`9WM630Gt97;e8_YieOye76sZA_eF_ zr_hSF0{{nYo3(0xNkEW5*0+Z3FP=G)DwfD+Ed1#PRr(Tg{{+x4eP=XyK)>dH#I&A0Nk^W?XC>*e=JG}#@%zL{LU z-@4b&?cWXOrk>=xD_-=bFK?UwsoH()l%)#<6u};*;+s=_a_>FVn(lkrtnJG=L>rO- ziXI}_jEr*+H4(UaBtvbDtTh@HzL*Jg^-;nH#6X7v-Ra(aH!VItTt9t|(i`ZRiG}Zf zT;{SH-~a2X_26J}-YEdvw$#fUo~)V7bTh9end)j_g;26KIpW2k$!q2j#p{RGbR%N4 z2NqCrHc*%E>Hzn!RpG*TPmb2Q-b-{k776(IFYu|~-SduK4`_T5)BF3g9iL8J2QDob z)~NQF|55^kulv6>A2voW?x5&C#U`TD3r#Haf~-)%#g2xF{%HIXv4VyLy9Y?iR|6o4 zJgxVc7aRI^Z)(rlPv$RHscgKVjg6sj}!!O&P6>tQgKHfyDF zBD;{Kgwjz|l^i~8_bMK|QLLxrNn7UP)(AgX=>fl7x~iWwRvYT1Gpiq)o7+H9q3B;Z%wBVnid%MuLW(U|BmFi7p(hv`8m1~xz+sf3 zv4fxaAeUD5_b(~pOhV8dmQcYd8b%TKx96-wwvFpW4+LQQ@_OU-NBl+pnRRp zrRAUuWKKs&ZKJ9Q<4iM(nWX_N4ifEQwi$yqutBCH(>1jyhKy!9ETPeJQEaHFUiKiW zS%tz)iN3itND7Q%!W~czOnD5}w zebQC>(9Zx694m^est%K6f%_m(^4(+Z@rk0|%8i8<88Y;z0|$Q6TR`jI9dQf8-ff6X zye&WM(HSe%s826jq}_U!0k?T@^-9qG z4ys*{n+I~8EC^w_%dF|;o zHi;2m#s?nhe^0-Y&)Ub*RcIVIKi2cUegei%d zGM|~rAn^#Dp#vTR!Rp9-%p4RB=_6Gaa1MDctIv;Z_)A6z}&n6YUDW#Y=0|zdIAO~vnmD8 z1_y`=hIJ$j>Ka27KMDs-%Nd70YCq3IG&f{cOe7ElA34uFtOoCs@#eTVm#$D82eXEi z>!=UqUtKs|(H#o=%J4`46#5ohR5-@XFUYn%S!4R&rhL8={U74BcnyK5scVhpa?z; zHXTSz(-=Uj+3iG?ORBjeI>QrMu^6Cm#fZ#m;yB5ZjkT?_!X=XGBSq%co-RR)G|&+X z_q5Pp5KJrFn?e#3V=gjg1BlSXAkWdKum~~d<9uXTq?;IPjJrFER|ZWwTEy!Qg;*a(&MQu zTqFA}#ey~Xk)^SP@M$O6P5tA#=f1z{_C)udKLNdhU-d#XJVpG?=c4?%M5#o5E~^X*dkO7B{&T${-tFNaJ<| z1dr{^DVO6@&ZwRMBUo=J>K@MCG830Yd$8F*y!6yDp~}$1`apvapIcAF z>q(TFpXRq7Fmtm(OVu(~;AHf$6aiwvj36^O4^SzAe-S5iGzLX&yo=T_)h{xu&*f-R zr`TU5F9O5|3ed$w_VF@uu~~Hii)vk7FdN1$603AFJ%S}nRh6MV|E}j?CddhzM_i=p z$Y!bQvFQf~clI8bKe|ZJG4x{8mPtPcwtRL8Q?Ohfxleu@YVfXcPpmThQ`u9wtx9pC zgsSPT0g`(KxGex_cjXg0C)D%#7aR0*J*&OF00Xv;MQc=TOj8(?6Y6NKT=J%5g69-Q z^R^5NGZ4v->^7Urh1h2WTZ|Sq6SHF=bqiIrc*{PuL-lg_h6f{2D$OFvunlTpG*U;K zuwVT|nfv$k?TS`A&a3D@d$v;I0iizNW*aJZUHbdd7um3iSDkixC$;+gv+E(xJ@t*P z^&1*R{%&oFU2#XtPW1aX#fLmrOb&chbMsCYBQj5zQZkClrK<@!vG_65j*o(V#^|dj zA;&%wJmlwj%DQ!+B9~PEnYp?K?(V z*ds!@50y$q*j!q$7OS@ve7d~-@WyPM#~FfDmm~SRJbuW|;7Z{zPDLE#0<&348WH}L z^z)TWal_)f%MSJ$;~N5pKfFtIB1R&0JN2_U>W=qZM()5GEt+!VIGkwxyD?1z35jK6 z;&4fV9TZ?0SN8w=cYP3=F$uKO_-H4<2x)9IHYknq6pz_d<{KI}VK8PM4q15ol!`v9 z$&E^g#q$W;CO}93p#ZLx5M30b?zy_~uV*#fPCvgs1Ic zA#1ln+QV{BFxV{Nl%uU62dak#(XO%uv76(t@8G((;rTJv%RDqC(I>2mr;{efBNXqO z9i;KaV_}-T#%QJ72G90=eK zZ?!*v##L$1{ibbZ?TzD$*Sct%U6mi#KP+|RYyWxutG27t-&wShIT zMP!ye3)1gWGKN{x4Bg48!keR_{4G=QqXml+V~rApkczRvfUL#EIK8^1_CBnbI~>)3 zb<^$s+@loxGBI8$P-%~AMQv+o(XG94mdal~zms^3l1&`8CGR~xtlhLy!s6l8B0IOP zAGL_ManFWq0iAF^n)_7%6wX7+#G7rFm&Um!vEMUuNY1KWPB zt^L9;*M)iKe!F$8{ZdEAWm)&-){dqprgoCaTa(6CS&tv}E}oI5FHSt#%?KUsU9LP+ zXUWh)AW1CiEbVevbI&6H(cBI&WWiNR(5~KSgr!=3t%m?ZQ7}R-!-7oXS zw;n2gOxr>Yhtb6ORPtJWIFY}kbHjc-Q@8?!X6+GXMD?O(L4a8dGU)oo`2_Y3N1gU- z0nz*3-f_F#z)Uuq74|;&Oo`u1#4t1Qu9(q_-X6r}R;NSQ7gR1J4U?uS_W5#Lc}gOTNq#3j(NK$`&9Yk2hSc;*M}g>8Y4|n1OW=+O8d+i_uci$ zU5(WRQIlzXG=Y>UWfBV6x2r9Zp4gG;bu3pe`W-s0@njEnN=Vf?qh zzuEA0S!Vhda_HTe4oyZPdcNAqpx&V6J{ctj>K0xXIqjy_jO@YE@E)W1(R zpm&YE)=@KgaCFzEY9_Kb&U0wGPV?pRU4uOW^J84Mrq%5?Y4>;#payQ#2p%0K9`)PO zz(Sjqa;06z@7};o4t{EB#e*lg31}b{jLbZRpUFYq!G6Uzlv`Ho+_mK?)xH0%th?EvLWfU;?MdWRlM;e=t|Oy+d?^M33AIZRsZ&cKRg$bzIWC| zY?zxNrLHZ%bACP9WpUmEcK4>bL$!)wrk<)s!$z>xqj?gi7=-<78bCXux6vTO8(s3r ze?opG^2fKyFXq?klpf>VDLIJj(4||O|5n1a&uu->(&{7+2+^<{f{q(X+7XwAWbHg( z8$-vqQ7WgS))l?ppDm z68DaUO5f4X!x6K6nQxV|r93($U<^n6lI+zC(&?&^e3(GH{r7%_C|~ ztGp3JNP)t8m~(Lex5{ki)?B+rUwdH;YthPA%H9OyJLV+!i7xs|Yq)Ie$Hg$5T|!LL z1Ik1~18RVjA>UgNqmy8TFGdNi3w?%j{ya;KWIC%j2s` zfrvnQYrXxum|fe7Vjr{f-IhefgQBV}4K4D008F3q5NQ{7dB2jv`Ov6G9tIN1Z z&jy3pn&uYqu+;iE?%e68m%1;{jFV8sW3`iE6k&y!kIfnz%+@n{yO7=>9y>(g&^S)l z4r!B=peSm)-Wo~cE`6AtC6Ys`Z-xV=Afc5?)EP&L9jh`T@mYEbuo?F5RG|h}Dpkjq z`>u1rl)qn0^u4po%j5jXrRt%4R1VWfI6lplj*$`)38AKjjtbRAu@%F2EWb)Usmq-|Zs|M|dI(ZDao$98hVL!(uq|-gPduhfH z>XzVaHB%O;on@hrQz)#5%jXp7BZYW%Wep<`=a&1rXWf)&rm;1Eipl;y(Ns}v^pJ!rMB!Wj$Fhjs3A4i0(O$s)HF00 zqJGH+iRKK@6>?4&Rj2~VaHY(qJhKzm$X8P??$p1-c)AhdvV27J#^8cm-p)uli)QNv z)hojnT9v^EzyPImgoF%tVYZ4O? z*BSARDU^~ym-f1}P`t0JeoAC5BHPcQLg(mShGix9Q=iQBR)*Vwc6XYu`qNI%etR8? z8?(OU6-i}|v){DdC`xdq{aD$+#m9v<9FoPZ;l|qi9>ru6Wqldv1f}337oD6)RNZ|E zhtKt%U;Q((3jVZ#^lV{kRq3CjUQJBpuy7<`us{h~&yK2`FR)78wwifXay9A=n>sd~ z375@Y@b=Vz61z0gua`>DZV6l-r~T_`cQS#T6SV}gNkK}Mwxb&tAFiA$($KaGPsECt ziuFEuW0F^g-#(!-?xm*6L2WJ-uYV5mKluq;RQ}4q%|4+hPJJ6Y!qhi#YBEq!Xkl|d zWzYFc1O18cckQ40?X;ZKfccbb^B@!b``8|MAm?a`{xy@N^{`sy$Z{vCHk==0G}rIj z;!>Z|05Omht+>i;_no`&+us*JNA8?T^N2Rv?*PpW-&}Bigrg3SI4Ff}bzTpw5}KnoG7LU?U-2BI zZ$D#az6=5mEaZ1x!s*ndxAJ8P6(p`kk$K5?j6w6QC}Q0gN|N7?-t~as_?}v_PL`p* zb`zrR!Xx~%Kdyd0c_rp$T&h*Be>M?|FINdw*3bTk1switfBbYu&Byoi`*l3%DtA^! z;=0OH5o?o6pA_b%ZD&P^v_`|ZSDIa^Ts0gze^{(E&Q1Nh%tFym^J;N7WE z9KYl3!n?NythQDLOk&p77R#2q(=%qRb4tj)c(gC~|7y7~sQm9?k1&+Fh-5xmStJZBJqT z{`5r=Hi9s|FdY2EKSTINYN$y71mRUGlQG%QHWQTyr*8#MP=cy_dLcKJFJxeT_Kb-W(k39vxm-Bx(U^uV4Z+-h=;B5JD_~*SI<+Myn zp$A!RDAg9WR#aSQh<|b%N`sdngtS;xK|!v<2$tm1l}0ieqZ>(110hc0ZEP4GM@d>s zLqElq05@^984y&3OP&+e-oDW{$u(wXM>3IdNL`4ru*{vawLt;YjxbMq?VPmt3A$qG;XfTa z$lkDr=RTahWoXrX}T;KaG)a;l}jTTi9(FtH@9zliuJad z9NTWGzIH#Uu9g^Eq^xz7B}11?rt%e#IW$Ay&2p4_u;-tjk1lB!#u1}(b+;cRpC7!> z%N!&kHqVUo91oW#y7wIhrx!=mzL9H*cw2v7>XdsFhT)09& zeVM4D9iv)f<{YM|zm8fy1;)J9MH%%IRXaL{DX&OVUEbLzEKLr_L`S~+DK;-;tMQ~m zfJ}wh3)D7k6o%5=Lka*8LZ~qdsiSRauA6IdQhXef=N^~@RKeC+;P%ZXxAv*v4>KbwY*q zYS@w;>%gvidmyuXJ1ae$JTmqen0H&3wtaw6TJ~vKN5S5E`jBt)Y|*a=2aGcM3HYr} zAlZd;1c(gilgr{uInqWqLZt*GEibG%P^n3Mjphi1(-`y$<~+bcDK1F zOf0k)x*sHI7soXm88E!DsVQe&?S8OfzZ1A0J^SUNuiiRx99TmoE;p&rK73e$<+7^$ zlaFX9`^C>+NHEoau$$&Kg#1xSK^DF8aZm#>E&@PLM>y*Mpb=D#g8Vy`t#t4G@SykM zo$u8DDtFr0pyYCO_FpNXe%J{(mPpVv33|{2?Zn+(2*V8L8@d6$P%pev~bAqH?dy!AnpWU!2NMm0-bFd|46uBO0B32hSy$jii466CJ8v($I5nSARG_M_%;0dPQUIABxl9?@OOrwj&9$hkmNiH-8-bc++%5h0tKqzjTs zB}Q8g_T5KNi0Wb8(wBv>&#Hs-_ug&(W9R+dpmO4$)P-+ftCyvnp&P+22A;MA=GY@3 zwmFi89#nrmm;vL827R##v?{nt55_i>8`2!frIeB+LW(UdvEi22NznfAm6E%%Hn_T@ z&ph_rWAFcI*xK~fbX$(xp5E(sOA(pj5|!D5yL=Rq|41D*b9d9wTbF3%-%r3QXW<`k z^O;+9*f+$@b2&~&kY-{B4BXcu=c8(r7=ipJ+P7`SlZbxVpmTE zY(WkYEi?)pM+L8%OSt8{2^gTZCPc)ecy)0wOh%lmiq!MaY%tIug*dxT0c+`FxN7al z;_z=b-|qUJD@TuN+$^*aOb-+kxfjU)^)|Hs$J?L;dK-S8 zZ*IiiOBLp%Ec9Hzb?M68up-cB{dlJS+E77A_tIdSaKoeWY3!hpQGX&UPQES%lly{Ua$#D8%z3 zltq5QtQgacP}11fMApx~8oIEK77S|SS3|*yg>O6tNQ-1 zl41N>{usm1y|6TV%{Lec0?PreG5(JlC-$r5yWZ_Klu||;?k%XCoe%)Y*k+D%n^U^f*{t8ePtj9+1B{T+FJ{6~9$H1f8~M)PVN zgykvhSF_Bv%CYsEDHKs*X)N|Ex;OydB53fnOeZPiKjdggg4rAxjh$zHAx6i(lr6o3 z-8=(NsMu$WeWZX$zV2eGas_D*ef9mXb=!bOjqC%N`j9`)oc*+0jmI9RQ0f^@v>^&b zp3F`MGvoAJs^TFuQAV&?n!bK0ehH$;Psc}ky9?qM(5TC#;ujs6 znS))SgyQg}I;L3|LzKjw2yY~%((S|HH@>t-jQyH)HhLo12_1C|>b{ zcxcR3Of<|bB5J?f@uka-kXzB^sJWIHhlaAYBQ!t1#m3cXPz5E#E`QlLH5w_HgEk5Z zW;hkWRn-(KRVA~4TrP(Gb6w}#4Tp?3@(g>4^Q+b9_1E9fSGU051RJz$DKx8xYid_| zPz&A{q|;uqkhC^<22M}kC{BvU4@(D)V90u^yr*ae9?B7$8jbm-uVFjoQ_+I!rq>?c zbUI;GKxH=P4>~mbQFI4ZFMgFEWx6T~`z%LaN3u;UkGy*@3;qx9IrO z34?a~zcwQOlC&JNwso)zsc!wtbDMg48Sb9`{t@HoU{By_#wnVmp+QDrWb-NFGnTQK zjn42SG&JJ|1@b)By9WE64L(o$htQ#2>t$J?oMDMOehxktI>NoY4(WDjdmClM4$-+W z`<_&RB3rBu-roe+;WDb73YDXn()}mVE%?}jO^YM;j`xt<}E8dw?>f_ zR!+Uhb8bM}Kyc9}M4XoTu)2vl6fP~qvD*-Ey`>pnlrfwcNg=Cfk8@A34_ARUqS@2l zo?bqdY8`o@R#&jL=gmUXWfB-2hXK=LW4CY15{t!oNEhb3S1~xB59c6?qTCL80KGT+ zaiNj+?w)3Gga(+Wh1T`eFt&ZxMh16eu^$U64up%U5YQE%&xWKgeS97?3!2qRgC4&x zc=DI+jUPJJ(%C%wCo`3Moxl8MauJv56GC{^zCCL;YAH&bUnKQ3QkvhVamD3IMlb$@Kf*zQbxgHF#$^pAh0$e4CzWCOqx!*}WUrH7Rb>OLKul zWA4^k)3xnvg-HJ+GJ?L2w()S}(n$;;cnAyHI+N; zUFzNi@-6V*3F<9NOjU_I@To$s4h8IFn>7(~@GpH^exbg+MR&2a=HP8dT>h@IeR*VO z2Ana(aG{x*+^VN&R$_oOp{D=)NQM{-%bCG3#9^tS!lH* z*_K_$Go$9((>H8H+U3frr&)f|4nK>7WjwOPb7y>(hpZ!$PL$VnZS#r>LEoB^i@= zhnR2kKO$FJskGAS1OZs(g)>V@*N9Y#zx}&#Z7Pl@qCs-$Xbv0^nh_jAIEdHsl7!7g zW|ox4MQVG87kN`JK_Xr4-R-#z-Fa&cZn5KX0{*TcOFsu=P>K~*O~QwQA(4D`lm32) z^3lYHpdHJq|6AdFymaPlx~pyDZ|58le`nr&xhGzbiqJ@BqX~A;`gYD_3<}srg={OG zXVVCXiwJLCaxsHu2G0{~jN7^8YdkL!;dLDq&B6nTYtn)OvX*R7j0on=kDH9kX7K%m z0cc?@j)A{Wi^uoid9UtA&GQV0yCV4&Ih;?^h~cZuA{c$nEHnXx3@q{PL-8$_RMt4r z7vz#Q6ybfmyhXle$mdZv)K!i8qO`ucG%!AI3&0oXtLLljgCEI!k3*>&#jys7yh0pY zPgon-6+4I+9?xJGa_OQ)D^oKbn7lzZa;^jwD8!{1+ya^?ifwm9A#2no!EPdK-G&>g z7h+Y8iLKaIPp!jg#O>S~C{dTLovCm@_gcS7h?#WPmMQ#V2Bz~|>UNLb?SHuX{(2DZ zygc5yrYwrT!p4rfqw3?FjoSsMtl9>!{Ks^Z32T*)RSlhq+%FxMzQ<6}{4S{#& z#8nLMcF*;Y;8j+pBj;NDOk^-HXNu1@P3KE6qb>mKiYh&@HvM6C?e=LC#k<=_8N9|J z$5IwgfsvUZ9F)^I^ElXakU6Bs6XS}}LSaAI*9m<@*q zPj6tEsgce6W9U4bzu}$8^=bY*_N&;u^7yDlc)yKU!;r|Lw+TDyJ9`B6Zy2`uDmG)o zEVBai5oAWl&dObIT|SziQ5+Y2(I>ttQBwg({?qR49phJJ2)(E;zZ+x>JbbTVz`Mp> zVc*RDg41)9p<5V!bsi<%I)yFcVK&MT;Y=zvo$1}>bx|^gF_Q^RBGeDLNdvaWiIN_1 z=tcOPj>4ExbSK1vFlufYTO{!n%ixWEs0P;MxAaZ-S@)5RX?OWgi++@91;v;8UXK>m z3VtK&^9s*@(4#%EbC;4@67ouXTRzV@7a6&HR!@HIJcBOhipbn11bJ!^9xLTy0CjFp3HLPkP!Q1el-9LWn{Km-nS5L}HK73f% zN8lJm>Kt2s`EzG-ue-7v&>~w(C@SAbz-HfCGF{wyfWpmR!q0^8%L6dM3_hN2q*B(% zxL*{s#lI;_D#kzseqb{Xg+v?8VA(+-o0A%Ym;S8pfAAbFLx!R1mezcN;hzYssy zKW4}6-m@@fX)^D9hInmz?n6ae{V4apHs|hwE5k zs1tee65)`i)uh(`B#tML$M4k9y5MEGP^IF9x|-uV15+JXh>I~TCWoHTvqogOkHl?a zEBrXxMNq*U$zGzuv{%SEVtho#9QJ_N@%8LH-voT}!SLM7y~Gy1k1n!nLjBDxd(^;q z*eo@DRs}z?)~GNF+4u9!3(V~H;y8&)wM#Zihcm}XHTKGmzwK=IqVL5EufE-Yv}ASl zy?q{J)BF(#^zQHe(fs!8`*1~tdmcY;4fuZU)!Qw{5`%w7S?7>vwBE6u8>PJ=nz$Ei ztSLOsN(Yq6$z|{K!o8fs6SvBdvm_c48n+VLv`T7Lm@}NTad!$Ge@|1aHY7}v3R;|= zydq~>ep~tT(6t!m2?>ELYw5lAH}md$xNFub{abV2M1R0z>nlT$y3N%w8xh&F>gwX3pRv~8)CzuBX@+WYjgNwJ1;9QD~-Hu(hVDTcKC3A zg1^X^qV2>{99yQIN>&{aq(JJWvx3R=hwxQUDWX^!5El2uif=VhHelN9dbACq+URBYVA;2;Cv zxcTUQ@xWV)Ul_qdei!7KDmSm3MtYF~KLq8s)(;;~g+SFqT~PJzT)3R$Gjf)AAE$~( zAB`yDjZ93527=8uU)AJO)~0zU)aTZx>8lUijw(J;a&SD)|E2c#_W1i_k?_Xnto=2? z60aw4=fr~$=D~ZeMP-~hc=gBYS5wB31rJAQ3{OACNj>{QaZ5p0vD2KDeCM;iIG;^2 z&bkx=2&T9cM`L{K?SG+3K?ifQ%~pN>dgZuPmUP6-Kqfl){=yawC*=N8N`tyK1jiL+ z^DJigyJH!6gN}II`Fqc)F0(5wae9<$VO^NSJv&MQ-@wne3y*(kS#XtNa1l2~U@pvc z!!qh!h}jqJjs7~8S$@*jz314-_wi+W&l^#XTtGyDyNf3MPbIN!^AoRP(sZ9q7iSQe zwogYMZJwXj{(1R!Ukb2h;^(C-#_N|hQ01PSZsF}i5+ z>Gxgd&-+Zr)vitNo(Zh}^JToo-xsKL9<_=WOlQaMzB!{B1;s9m1d4h;#jN96x0M7n zTJ`?cw4|%Pt_5H7sWlY_CtfP&1G88bM?V_$n3s1xC|N(g-`>zgKIlb^waK>tkjde> zjRPOCpt7rFEFM;7 zHfZBCdtBdW4j<}yNfVe9njCPslJfb6%lpYt;8se^=hY2Zmx4^2!fkrGoR3|n27X`O zyp>{9kfvAJAS1ZgC9ZTlS>_bRKXeI}fTAx0bQcm^!FKK0MOqZF9vRpNfnElFjj%oV z&POkQDk^{cjo;M#O-<+e#JRhXUScKOx6&4jXVa2}9cY+;FhO{_{M7C-D z*k;)M`_uGnl>wUFm{?`fMl@5$G2P(hcrsK8Lp5IMb-jDF&U=pOy_vPZ`)p3S{&2N8 zoJhsCwoGId*Pqi%t=QINfaqoHLWUDm7>Ue+fnt?gP9u~YQIsTyxu?gCbBCmz1tf4F zayL$@a#&wW9yZBOyP)tpP&5HV^_|edYyY=UuW6I-;xvS?{Cd(J~U86 z)Or}^dY8^pH*D_+7;q?{wjlR!072KN&7Fju;4nUfqyQwYx_|L#9(7jN$47dwfDmnN zmMZ@pB5*3dp<3FB4tM1P!x9Lz8$a@cSDbQef zw5nY}Ia$6fOQ)0e15)F>6#4VXPR|QP`&ttWqU!%%o^-k7;?OetAjMOL-(SiZD#U{N zaaD6xKQ+fpG$J#M94XpEkvCTp?5eBpbcK3#+)QEnlTqncOXDb@f`VsJ$n}QX#ge3S z{4e-7Z&O-5*j9&g^_dDtE#zpz2hY_wl_{P6}f`7Bzk$5tteP2#D z`)10O?s@;a5vBvLY%r;w1%E#Dr@Ht4X37;B^|(F$_+!iR{lk}@FEbOo>{z=%Drtm& z^SStJFm1MwTN731Fs?skRqssE7#wHnv^-_Mh@GHe!GJ+?F8-*AdYt;d@icTcb3&HL z*lx~3_pet!ps&S=dF6#xjQnEq5f|R3-KC9PC(P6e?4vzPqhOl0K3s44h9ktgU6$|3 z0~Mk=ZPWa)S~e)h;qr)XXDt*j?x~NpbGT&d(EIaJ_4z4By_bpo-Wu9QrT7zQP$6;~ zu(pu$E>JN!S#uASar7F@+AKt;=Dgo2O^p(po}3-GZcW^|AgLW3b36T1zmCP?v3a*l z_j88H3fl}v4HsAp6q#kJLbhWPRMn969w>KBDXpGMamsEL%_dY_4f9wusfNvFNz9lC z6YdpjuZI_ZepFZ{r*~LCLv%10lWDu(7mPV?4MRlq)X%;B^Zmr=m&Xz{8<)aGn2B39 zbf8(|pH~YfRC|0WfL*>UnLS6I17*=6xO8+9BJwE?X5dxR)N_ftd~2N|uAP<{?K^%j z;zrWUAsoHVZ3sVk`FBd(tJn!@Rv%hP4tkL!1u>owO1agln1jJ64bV(4#j%~T%qkLz zYSPsSn+F{qrm0@-TL`JQo75|eszu%va>f81pFdw&O$pb_z}Q4)6sKWE?vNSbU7oLu zEdGc_5gh5m+JCMzh8Sa7<&9!nlvK*5`=%TbY zr?UigOB^SXb_LnWYg#kbleWLn4ndyOrcrQ!8Y#t+rYKgaW^W`Ybis6L+xjPd-?!1F zfl#Yx<8BuPR%eabOg7*(@pdv9y8$FqNzF(wk~;)QfC4p5wrcj~DbHG)QrR^xSewn8 zITvaajI8yx&fXQ7>N3zYvRg=PTjch`wqqECd;yy%^YkYju{&7b+8qma-H###&M zY}0b{FN7UNo^y1Xa(tDNXV(UI4rP`A?E*gYYWr-;^U#OG&tfi@7LS*|uC-J=S@GQcx(PYLH0H^M-*x)t%h;}C5RbQ2Q=$jmzI zOAJ9(sWwG$j#)Hy@ImbfHT3!ExS=;{m^Bet4Fe=xX8;gLR={Pai?Ba<5Qn?K&5tT9L>B1oETU^(`>M2HWAtY&^L-$Q-&)(Z=-D&0)1iOMxIa; ziGsl&Tagu9i{mbEp`bo&)#e)wA(uREb>mJI3{FZ(hYq$IK`%7c0P#g=OW z;i5@=37TPL_zMdFASlN};V8Q`DS+Fm=aoKHpaCcV^p-Qc-(w-;=#!$YRZux)8=PFd zd)g{^&{Glh9_Rxld%hAc{q^C+`o-p!o|~sje9k5vPw=^aUyn^=#4V5r(9K{J07omB z5d|Tp0J1AYqW>sS&lAE7AJz`WMnD1N0?@CV4buRSxNARdoOJB0z*7~Xd1lmS;3YRD<-p8o)uFuVc9ukUVx zwaxoOR)7KS`=22xawIMoWvwDG0|Zo%L>RG{V^!5+Y4zK$xA&hct7Z?Bk25~L`F^mw z&tI~^y6j>=Q_H{r=ptp7TpHL1acohxI~RqlSWJ*;W^xIJkU>-rz@W40Z_Zu@AwF|v zs!M}w4?yt{SN|mF?@B^wdHH?XlDypx0B!>G6pKoJ^T6$^HX zIX}SWY_@I7MO5*JfMwhH9q&k{rKNn&nhW+L-=0IXA(=>R7D=Yi2l_JsAQLAJL7_3& zgenv=q3SB7g=X3bSgnRNWBvJ}VH*9Rxa1q=Eh zHjrS=>+%3tbwsjb|Nh6OKlZ))`{)?8SGr>Uw)ltXA7`&|Nnx#&J^)T*-$4U#5|Skb zM;2TT7+nl_h9}zqm|R#Y1eL*%s`W#IZ{sEmfF5XU*{B8$F5tul18Wk7Ku&`BHTyf^ z%O0`E*}%oyI=z8C|No_Je~7CTBsL0QXsi(c-07&^0Vdz|L1_`($pXk~n1V4ddY3E= zmJ_IN-XTI206`$Qy_kaJK!8R8BKUu!56r+02>w99?w|~QdhVjv;iIAs1qS(s9q{(? z^Wu9Sba#nND)vql2_H=a<>%x|3KmmSrhk3-?IL+RZ_#D5mw)TG2Pv2C>D1qnTpE0$ zJN!K`@KCKS4OjpW>UeA#1SmEJS<4J zSi6|fq^&5L1J-RWp^D?eK|BT>`1SnP8Rf=8?f>57gu`d`vq6o!76YH${mX7^3S#e-*~-oB?}`Aw0GP@ExCI2@hf(nWAPw>M z1;p~jRAOB=7BHF5@3^gmZPRLVJz6!zcwB{*Asy} zfBnhbn}2y;8+lC4bPag-hu|#300js%(Ciqj&=;sB7O?gKz#v|suZLK>+RzLSOI5X%+wZ_}-}TH61#Hggs7?$Fsu#k*5mlDdxc z1qLf*hXDZl2-_wL0s~f9Pa!K?nhV3B*l3blUPq1avcbgDet^cckby4=1IPnypo@y@ zW*5E%c!o{|YDRi~GWBm$joY4-RQ3IYRQ@~qWzjVy!Gw!a)Eg=r0K9+C;`f0|3r-CH z>iWQC!fYMJnha9KEkQ%vp^^}QM1%-JGr`Fo?7We@9qQ_9f&V0Dc-{K4*1N@^@~OXQ-_zjEjhY?P@M_%vyQRakTyLh~Glj)p;i zIRr3l*r)-_@f+AKPk=!y2mlX07V-lMD6~>dv~|I1x2jLRt{=({?o6uqZa8zx!T&UT z=i$KnbZ}VVaQ_q9!{LMLP!HPy3yArNNB|n2>R{eRIx@}Q3BV%*nQ}l1JkY56M5}&8 zb{*wSL(Rz^-44t{=PgBh3n$P2_r*hbpJ#etV+HhRa3lk?3=jybuLM+W1Mg*d+(X$# z)+mTd1w0M_!F#u%J%UH!b8!@%i1J~Y6sU3jhBbQoht~UU>W*`t$gIb&w>>)?5#)G* z1-28&zwZxL2p~P_ge@Qa5aU>tlT0FMBO?4{{Y*Q*^M_<`4+F1GGHQIlCc= z5TFGIiVqD~qH8q(F4z|WbN2vJLBb0UEI(3 z$P}zGco=-JHZO#IHm`75#^67ap`ab41dJs#tlCgPGeZ8a3}A;`G+obsyIiafcLcL4 zSq!5He8AJ0Y7uB-WG6p-yWg4!9Z9#pe>3q)D_zaTz%m90SqFt2K`R)X3#IKa7=%zE zmSE+t;49?u#kMXAfNwbOjr21vZCjUV@|Jmy-L^)gHrp z9}hb?n6)^L9A`R4kiaU_T0#Io^B@<~(Xn0Vs0aFJ6@Z3YfGZh1`7Wo?wFEiwz!&}4 zq(a!bv9e_m#6#~KTG6HX8BMjXs}oXl;H$|O)l(^JC-~Vv1wB9ic<;)a!iSmqXZvmc zZEl9yRR~(ne6E0?4EzcBlc-cB)XWBni@^a^$UetSd2yg1)B+;7bypr3?E-tkS9X2x zsoftG^nbkIIm|}xFgSOMK62ZA46F=*=8Rwk6*$&U005hMp)4pMaj6OdM&+%vLjt+& z7?>d9-*xQsogPfEW=(dFlKf0@Y)A6#v4O{#Y3m#tzuw<>M!lgd(dRv#_s{V6x;{7ukGkC@wdvGBdomuet36vNexpK`&80^Lk< zEpiRD`uB?nX4Axgfr@^(9Lb;@v_CvV;N+kLjCbx{@m3H_JsS8s8bl)9#7Z8T;mGdH z8o&Km922x#2!IE*6XX%e33dWy*OJ2b>E3T)oJ~4BkW?A*xr<(HzIOiF&c_NK=4shr zpr8hn@&^jRM#Ax6_sF#c2O`7Mb1lQlvdSETbEsH^D=suMxY@QBPd=Lo!gzqOt@Z3JPE93oL2+=(m0VQ{Es9gl zk&SAVE;4wuvTvT~UPh)*n2?)IcUB`Pt}RuK*V}e=7qQc=eWjs=Z&TP_J2@eQc^VqD zpuhhnKWM{?|N8ZNeRO=0QYY!mJ`DDzM-qId#Y&_zh&3SK#(@~a`tI5j`tDS&U7A~f zyi*&qWpwxZ&-hiHx0NZNwu9K3sJxD(Rifh?OpflB>HbK=om}t7Cd@)Q5t78QO?^ns2`|ufFlVa4eE}Bf}OJWLK-4984v*a`l z+Uif6o{?*|+Y!5@9{1NSQXb+`Ifp$JNgKnE&}PhuPFvXCbw*$H-l^IE zV~xO=dxqF%hc}in>`GhrW`eQMtW8|4p6F;~6}Hh*iOh`?-40 zMq?(RB6bSr?;Cym56(#ilepB5q2_bcjhBUj6?5t4*E~V$bgj zbFQ4ceY3hvti2mUzTWTejc1q5toY%}k2`}mzL`ISEDfhLzM+lg_tNkjamOPYOIScB zXt{AXyYrivg{w0&`#{n{q9tV6axNW@(f1mP7e{n}PA!wZzj{e$)Zzh^iM5*gjs1}p z=g!+bSCNlzkCA_QHFW#DShwZFcsTaXnKS`MMzi>=E{*x9RULN!4!oe3Tutuw=I>8D zQ@F5?Al?W~A8~MshHqun@q*R)bZRE00}&L}dF1&dRiA7H3AfS~*QaIfX#pzr*RTY2 zIwVFXh14tQqx!ZF!Wy19J#(0(%lxpg;>3#;D<-orcqcWUq%an2FglJ?Z)e8&+Ih=3 zTPpaw3fS&Vjl1QXmO63QJZwB&;|*GFojS2G*yp#o@?f+jY0pg@8)m_vWe-P5@ZCYm<9Nxxnc{ zay8|}f9`rZ(=wsW$k#Vz<-BHU#wuE76aR@F77>df>V<@u3Qb(Kz0WmFHrJ?)&o$c0 zIj~8gI{aM7Ki8wUuud^qt>v|76`u zsf8Q7>$0 z^$;ecMEmotAQh#Ee5y1dRqLZyggBH0t19 zrpw?KP10p2^)o1yK9K6vq2ZSlyW4pifd=7yj15t$sb+R}3jo2OzadR>EkN&EXCr!~ z2i8Ql6@U7l!yiWI3nX7*HfHadc4L5dlr5Z9Ou}N@NktOAox)&SnFaL4;lmm`B#>ly zu~SCD#iZR$mFgz-`AU*$zjGnF=SXRi9Q8nM8<*i z`KSRJDZ;At*WCBtePxD9>Eph|fyYi3k2|H**HNo8=>c_eX5TnyEZi)mcS zqG+REHF_=}^RR1k7<0#6s}A;Fapx45@HecQBnHA;&gh!8u)Po{?|_>y+`XlV5iph& zFws!H|FWBmqfA>&1+mUucuvXbngyo0dv<<>&2tK|QuKjEGA}kceMl04Tmdxb`c)Id zm`;l$)47<;3?CPPbFq3K8z#&#*9-x=QDFd@;vgn>(gV3I7Zj{3irde)l8J)O;|Neq z>ad{LmQlZQr_GQWX6~@xUvbL0dDtF>xlTfX%!@Nq)!y&?_TG5$D*NNUy`z%d0z@Tso5cusIAWtPY55AeB-Mp^753 zwZca6{pp&Uz}{{*1-0n_8`E*5q-vxffh)@ zQ+!e|?|YP~I$q>{-@44oT%b;Z-lO;a?{Wxe+kd8T|5XkFnsh)=@bxq39n!+Wdp~sd z{CHz3x$^YcGf#(%V+-n&D%GZjO-*)RMDOKs*9wEH)_Y+ z=x_|OZg|O`UNQN&p;aEq^aec#9YDPRWrQJ1GBB3NI6NC8v2?U5!mEXY`^vPYtb$qx zrpExll&6hK@{b{P>wjB6`r6U~Q;!QW2gfc}(P;jS3PDMYTGLejb zxF(aL&}wz_%QP;csd$pQ^oKnL0FYTAo{?XMC}ev?STX>dY03uSs5xj#RRSj z!w7^UrdYs71skVH6}fbLm>OmQ%e2-m8b(NREh?@A52t{H_s3es>=^!FukKU`(&O4c zj6unppT=425Q3tIWKkJX%*5GE=UOR5v1t+ec0h4tK@=N#g?9ST#8l9pH(jl2<*e}a zx(BAJW9HY7pKO*~3k|Bk7gCO>58>(AwmkaewLtZ7l4ka{rA0mb;a*C)fZ}RQ&jR$5 zJhPr1d`g?{wui3JSCM@t8>(#>Q?DIdZVz zim1K~*6~fdHh*{(WMjd`(&*5zK@H#>nh1s4Ifr#%Dp+Oda=1-Vqrcy^-VV*vRPH`m z71q=c&Gq+zXmJxWje~rzHbMH?Z(M!mNdVV2MU!^cJ1CLi*1VWWr+6Fn(nlG@ly53? zRk8l1MaWT2hX9&i-mvo7tphy^=a+cFiUAf~7-+})*6%2k%kai#l&VUkk@!G&`9}qo zGoJU@iiuI{>PAo$G_)5C1DzrEx6KD+AKlw^U7hI^MmuyoGiqD8%Qxob#=!GY*`?_| z393&D3K-+E%ApwUE1539S4t2o=R=9^s{BlZ*fju{;&CawC~^7^pS*R=Zqmi1bb+;5%AW|gg$HV{55M)xOCa_AxVWpAhNm2$!-3iqmVv&!W2)NKWEUq_0)2O%}+f%^_|<|752WZ?9ZA5g8X6`TE|jr$6ys zj?0t_I{5~#`xM7m#86l@n_>dGKA)Uc!H1WB`vJWz^xw~KtoOt!XI_6DaHjarJDt~Gmd#|Vn~3uoUDX^c6~E< zt@z@mlEJw;%tCl_-N+RZ56b8MsYEP1&SPZ67LhclI2zCDP!K z2{;oO>VS6o01na_$MK+AA7w@ z@A{z?>X>4=3-sd;jIiCM;x7l{C*S-i{FK<*dVW0T=kmQh zk2k*I!_sY$5$F|V*K0W*AW^PWI$Ugbs?o8sHS1{>UtWlTMf)K+Yl=!Lk|_;)TbIW5 zMrvE;v9Di+QHF3fdDZ^88};hC#KnY&OFu&<+_W@L_cER_eLS~%Q5R(anrFe#V#iOT zQ7bL$x&!U9-w2g@uG+zHTuiOevGE~W^Sr~pdqPpD(;|6qvJyQqzf$F`Dpcsvz60lB zMGc>q%HK zVvdDyw;uo~x*qIl*?U1PnjYX_;HpwDPQ$BG$flQWe3dquZJ^Jl+(&Dg{9-oq@c8(5?{@FzL7^-#ZGnkS9jtf)%`)y}rN7Zm?k|7bdiZA`K z^agpwF2sPuNx)|{k%y6FVj!8|Kj8MhC`!ijnv%it>YEC;^tj(wIZ`8Q&X0r&hFn!!)$Nn;{D@DKmDrZR1 zIX`7U#n1A@ymP2JJI6Ljr!KvKqNzfHke4O~&cxjeUwMo-(UG*6lco#Wg}@p54>RDR zjwBE(Lu%kF9Pun%Xi}?ODtc;&EU9`CU^`>wgRCy7t;7QgngAUb)FQ~U>JI6spmHjo zYSzgOLUEr!wU+?!-ra^eJk%!IxqR=~l!)+jMx*V-=Y4L6G+ffyHD-(V?NU6OeB!*a z9-qDHP-yyw(Fppe2K=9v0L}G6`(0~Ex!{_IkKB>8*nMLM9$p{^EE^6(VlF#*@JKY* z^4r*4gqvZ0!f<#Ko1I^uw28}xH3q^0+;@;+>WRV33PuPSCjy7WEL71=3n%npjLV#+ zUQ*X2K^~VfPjjB{Qm3QjsJgmD8}hG??R1p&V%! zgGX>|qW})&aD~Wns40|B1lckp21LCqe)=Yf6-TwswX-dQa0pA*%>d|XZ+_iL1RDXUgrB8<2#IpEo||FMX@+D@>sS(tuS zrc-fL#Ecf}rj8g56fMSEKDZV9hV~|(kf4s|MAW^RPR|-jUmTwa@wTyo7rE*p0>^)}Plf&?B8ceEhk&WH^c3g_*iq$ki zL{r1Sb>|a4m@{y^C6oceDBV}g;SuFwO#~zVLpOC4y*mMQZ-%WOSSGnry7tTUP53zR zFbPb=WLRAMYU8om3WYn}ovzE+`!U3Nv)W-FxQ7?MWVv}RZE*H=bZ@c-Xu)a@3vdTeRuOS$ zebTxH{@giVq5NORIY=ZnVVKQe-AUs%sv{1AfllLwG?oNA&+RDKqBe=;lhg#YNh2(a z0bmFPvkm6_`<3qt53E62%-*l>J5>Ed{kjYvr#xM$H_o3gLa7V_>Jdq7Jf>rl zJ4+2w@z;Z%vW*?8TpxK!|Is^FVk4IHYVdb9aVuq~UL8jJ|Ni;e+BtXbW@1YF{O;@1 zFU!7{#2F;f(9^-ORlW`{t$L-Aau%Y9z3Bz!MJHLDI^3X%>?@+i;Vn*4EPYbc9O|fs zIc&XRh&U=3#=tVdC?{dnz{{MPMp~8vAtH#CXQv^bPucGkTk!WUd3%n1v-QOC@7$I+ z`o%3ihvW}684b^=E%<=6pN_0|-(w``)k|rpGT^_&ndk#0@iM&tRd>?_s*VSoKP*0j zLLd|^Pvu}8gKp`Ocg2P~N*`D3qZx>4bY_-W$p%w8!NR!j85&{gG9c43LTs zd@LMW?CCxz2k3ZP+gMmX)ty#`7r`7sLFIk^#LDyVb(8%uv4jWFXd@uf(MI{C3ccGS zfZFn+;?}wL57e)Qx;IDE{0g6Hu=d`ZqQO?$_|B6YGP25+#@Cd+Z9C)~uz?vMXX=G? zc|fZ}jQu6y05}bsGXX<}jhVqA z$H%shbMUJf{wWU0z5--`74Da(wulz>^Xa!pk^#OG?mUk#sYZ_imSwn#D#aT8g#Kyv zb(wsG#=U=svLm13)5JHcY;LLt`H)sbTRYMvx+)AdUac0z6u7+ z6Tl!Y`8&6u!9Q{ZMN73vtZ!nx3xlx{krcuUE5_G3;{b0ffnW*u32i88l7g2dIe3>t z45|wUmQnB&u0$rl@!=i{L{ttN1hqg6V5)yufoQ}>i0D5DK;2SdGy!tmlu74h`3^xldXbGoMw3&Q6fRQ2vrd1&bu*fpF2(N7a6WgEu~VH zMVZX#3(f~gwyltftE}aQa^I=ulhERbQSE_JY1Y4S9C6xW1O9B!l7-#~`qyUyF zF15P)jlf4;e8uXvmv&L}%_R5#dpQda{ZA!gCGb3>S}u;SQa=6h=B?{DuRT<19~q|q zrPL}OcVFxOQ4sjhF?qDWfO4xTJX2Zf=c~5G1`fPsgdzK7tU1G+!U>PJOx$D}8e2Hm zpBUW*0S6)aq>gr8hAOz72YHpVY#q^6W)h=B)i?}|Pl_y|%%sXg2MICBwN{10ua&oi zkT99HFN?y(S*V37qroShi@y^oULxs?il~-~_ywqw?F^SnjvzPz+ib2Vk8g*T+rI+0 zy04l22mXIXzruaON{5xs#d2kgkP{wL2RZd=iyJgz?{qKDwG89v4vN?+17sFCX+bXK zj^v7@o71VTjm0bWIp#TD(Wb>E;n(4~^%z2dtKX_~sM#*`;<7;F;BfSN`kM^c0N$D8 zV|8r(<_QO@k*+<1x1$2k4;vioyM$4}O9C@FqfD{13ax2Y(Juuxy&8LV_ z50jnH;x=BC{ZB8ionS6L)zqD`28AtPBOpzy-3OwzrJ)3r7hT`(L@|<9gZ|z)FmY9X z=^ab{<4+HMZ23OGZD;nMuGI8#yGUlC-el0Hu1buOKPcF9(8I|24<`5Twz0 z^nJz9g?zA*Y_~)lpw8xwXnV*n4?gPLjadMj%mubzu9g!rn8L=PTCKyRz^93`)o$~m zrBkyF!QAj>++wlPOBN$~QrLKE3g7O9JiwDJZgz5!pLzD=T2Dbb59rfiRT{K%wW0($ z0s8KGOzoPsC$Cl*U*2*WdlsI1RCn~;deqrZJlY-IegA#%C&Ov6K?!5F^&u0M62>Nk z)R?g$4*=nv(ZCOimPq@9^-VyzmzuGoPGBxT31w)QY*2> zdGCOp?XkD#|B0W^cMFE0<#odizvEa&?qocN9wCK*)(mqfBT?UUXS3eWj}@=_Kci1Y zB79_;ZND#iNkcNzKzmz-vS~kkqk~%mMp71vvJORtnP;RkZ2(^$APFW|TU3v5jZ*>= zOvve{A97k+*pk@8u@h_q2wJ;R=ETR|_)^cG<^dEy6cgHX+@;Wj!k{lRePD2m>O zo4}28sgbNh8&X#8!>y}#x@W9-KT-pE3I`L!GbW9EQeHiiOA*k~aUl5L*U?^^=KiSS z&z1cQ{v|hnci8aGchIo%CyOVZ0WogSu&RDI>wt!?xRQn?)5JvE2}e@V!r?_sB%eA# zoHWU?14>jipy5b1b`>fSEgThXZ8^rzF9u2vL1ZmKI_NO+!;kV^c|f8HxxgoI3C zxB%c5tDcFR2#R*A8ZM_~1MNuPB{iNZ{o(a7+i#CPMa-kFE6A?pAwN$Q4tbe?yYNe$2o-JP}{^)@Ocr!11iPN;)&JK zVQLv;u@El?Vr&kR9pn}GVr(Esn0EP_?u|RwY9CiRxDZztX6ZJKrQvSha}sq;btQPB zf&(dCjSxWlSAieEi|y{uRP0gNDDzj1R9hA1aDVod!|Hf)6c;REIyfd~Mf#-28Y)MI z?wy$^)$0(1Fs|u%>KpX8&HM(xm>6qH;!iVHPTWYh58k@SOi~h==wnE0>}Yx?!nkte zfX}07jvUq$iVw}j!qb9I!SRIfN))^t3CQg<^fFE~n=(O8^1dAucg{#6- zxG=i{=M_mWPX6S-fJvYX`R3iKIeqG6DyRF-&1x0HrS0|g!YypXF?G5mw>pL}3)*yn z%s85oA&Sp7$7&q|__ZB?IVmoxUa})$MP|c;cP>3;Qv$ z&SfCKmbXGr`eh2`n{hH=fJ>O=#e1?=Amer@?*OR{{G|?yuT>AGi#G1CSq7#RR?B^{ zZ*WEP34X44BH7isL8noA;-p3E zp%v$w?Arc3FM&{Q>l{ly@No0fOGGQs(N!xpIM{}m7V>y3GsB-0orIL6k!)SEXczy zzj$$BGkMR!g-G(R6Cm)n*L=lRDBNII@0*>$w4>%71|T`>mA!H+%#Hd|{w4Ly`OSXh zoblR#w+G7~hmq4a1v$_qZK5Rji6OoLmrm`PFN$p$tfrnGr8g-Tno6L2!fKBg8K65= zE9SdO{vBiSr3A=Ak7^<Ln5k?Iq@1eM7qTW}1Bc_Ievw zPS%3Yt<_~cB1n_k|$?VgSe6CWIw<2vX_Yl}gJ!A3irGFZVaL zQMz^G$kzZ#G8Bmpu7(+N?+eFpl}B#GNGlD;M_d`WL5`hzyLk+uf(^j$@mdB#hOo!L zpuf&@`0wvl;|Vte(-y03j^*fRBn!pRe6AP4IAA$AS0NIS0FX2SxdO+F=F0HaGEt}! zg$GQpVxD?m%**9PRHu+g9NxW<<<^<3wriivE$|Toba^0B9;r$t#1v#|UM+WJJY-qj zMtZX)Gso{k0S0n2hXN}pjuXmT-q<;Ph`QRGTOT&lpQkMLlkS97GXzX3>1 z;coIqi(DK64~yC~GG?=2OH4U69GD2AOI)wHQ68t%&FpMs5gBax zIaTG7`N1)0fh`+FdgrGTblsBq=&5mFl7;}awQ{*`W}u6f=@a+1vys%tQ<8`K#vKL$ z@yFF4>Pbg&=UdNc3EoX1&K-)pO$#r8DE2c)hudxGkwz2oUXx)VrTrYuyOp`}UO-*V z#-)%l2diEx-ii(e2n+e2#xA&Mcz}eyWil1?bPd2A_Bj{5)HSGE%0OS}O-wG0Y*|N$ zT6-t!p3eMI5jo!q14mbT`?MB9dfWu=ZZ$PFji%#Sx~62_j_C{Vb;-C zO*)mm>h_qtA7_>sNj$wUI#4h0iEmoEvvyULuiq;8A<#&OVdkf0!G62pd{lT*$7GhP z=HCq<(3T`h(9N#v_m=vfym(^nQ<NtYhnZs`mm4#VxU|3r>N^r2dIK5RRY(eXWCEO3Qp&r0>ltOY4_&qh!+6bkI zwH+&e`Cy68j`i{Ti;zF8yhfOh7H6$C=z2!|FaK-L-BGifRdITL+1qP&oM$^9_dJ4R zQl8z9GD3EcK7A97w6;MHJ<_?`cdBDpe1l7qj|+SaQbNL5fO%wSl+r47D06EI4n8g;NqPOwLKMvZH< zlZWdco00oPkG@@PUZhrutIXulena0yfB9)}^cr*H!`t`7fk)r2`)fAt?fta@96C_; zNAKAa$0VLlig56f-j3o0$2h1k#U z_H6S>p6&&&ndCs~LOqIPokvxe8_?IeEwfu^DWOjs+A9 z!;FnX6iOSsVm4sMIgY)hR`6>@tx5PMT`ONj%q+3Q)Zc3EBwsjx?W@&EH}a}G8&($G zdOyD9&56z9UrZ0zPagAr8u&x%>wd;o-#;-`efj1QeF<4i)=sf!L-j!R^uM7IXa1+a z0Hm`)bH3mHj=uWCm6mgcow5qmi(5b58r>y6dAf38;8uI*lk2h**ufito%8P=o!&R^ z<1a@3a(_fIbnstSR_#^|vck{?#%Swpr6Nz&24W?%W06oJi&3dWVO>^o*z!DS+zC`^ zXEZ=noqlTDSD+8k-VCWSIb8k?RM@%mR@^R&dQgm;$sCZEV(XV4>u$WSb5f^Qlk03_ z=i$u|(Dmy-1;fWYpsOZ~}_DRbC0S8awl;$)eFgM;@V zBU}=9+X-|=BD08_`>??8IwfGt~s!@AWc`S z0`(w%H=6eMICc+5ZgxO(1EheKJp=b%X$M43K!0n@QSHj|zL2kuex=V>`v@{f<0qfo znXSBxy|-yN(Y++$3cNSQ7i1NxVLxTVeO{0xLzkWSmPe4a)(j+MPfkuzT?)-v0hcJc zQSQ&=&nW_kfn?A1ys)?bkGaIJ`$ppB=xSH95EqJsG`{DYsSRtaUnH&$IW$7Gv!ea* zaQS}g1x?x_5>$WExMf}G6YblIp0Za`n~Rn2!`Y~m(+yqo`EoLKU}T(Q1(M!^fW8yv zu|9NqhQnQ>ZAU9W{P#3vXxkzao31WEP#rYbA~iCDErY79JR+RV|-}{Mz#*f7fvC zFNY!saV59}>gLgR6%g;V_FCrg%c^If<;@Nm6j!fwl>ObaYphqzf%Cj{c~yu*P;Cs& zZH-lLpUT1$^n`8qi0wB-aPO`_T(3w{MA7rs?P_;zQpFL-qJ4z5wV5ZZf$$$g!Jvdq zaw9R=G-xe@L=?%%TA)iP%YI%^YsqvX%l98Z3~#mTdARsv$AXtVqwK9~^9NS@gwwQh zXr=SLEw-P~*#d50+H9rZK^-IzPkMv0hHq}ZSR0ah1J1#bOKucyE#Wl@jiLy};2*?~ z3+Dd(g{R|j2Yl8JHE>a%aypN#bXnwd1yF0cOobMkr3Kh=R+jVTXE?wrm>G z#RsD7SRUz{T=}IPq&0*Pl#~UuxerfRdgwIHxwu#=7_Z`o;c1vvYCVtg*{#j?3dy~E zUL)lNyJrq{BzR<&VebXcb|+zlTA1B_G2XOK)f!(R(m0SSkRFX})vI+0z^4rCF$(KG zST64gZ1TNM|Krl{FV0^ObdO&A0y5aWN;=Zc_O=rppo?~M&py9!FZ)7nEe}w3z2*f8=o+fy<5)j&QKWnNnDGInT}DSO1a z*&^-`mC;D5kt6o`!KZ^N{bGctjWh?R@1kCpvrsTWedayp$jbgXF+f<66Z;LlftmKbO&EZnaDoD@vA> zWOl^MgS-GvJG{dAj8S!F%hrB=UEP~kS9*tqv3NKJYaaw!;kgjQeWNkWV58J--qPqu z%vq?#U)O&m{sY_|Ha;wL2q_xjc{XBjAnBE-_VejDJCS3VLIe(NZ0a~%Pm-WQLW|w( z8AwM0mSM|E-f+d-g>ueh^0Z2mZbqO{7-8kGNf?Zx9mS{U=kX?PixfaC+aGDp{#YG- zrr`9`j>$|nuX%2bB7I70Rxr!jq@buFh}tg5Y+3BwoS8WwsP+a`dwE!WOjx}RV(xbu zRaZd%Y%g~K3!h2-kGUx)O}a4{`NSvVF&A-1G~6KIPqea7+mH$`L!I(z5`56kPGL>U z-?XN@G&ZRo_Zj~$)XpU8`aKM&)$Ki3`U?3Qq>0sPi7+MPSFSPHot{t9@EC1boDfy+ z`E>f?czFJ{*GUhKzMkr7@akfbxexRC#(k?j+-ku^46pfdjnra}dhqE_H^94xwzMir zxH~M|y(Xo|VP$Tn?OLKokFz<3&b)H}`}zB~o7>0htqbG)Gw7)BNr!oS9cnWEAtrL$ z-N|_8GFQ-+>&55CV*dDwbB_JMJvB7(CGoGbx&+;8|6oEFigCEjytT?00Ccv*W7Dbp zpaZ5NXAu}khh>qp>3nmp($xlde`$6J8C*G{bD%4uc-NCQ8&1l3o1U|F;L%### zw%JvniKl1QR>xa2DBAfaeEPNb)=RtfHX-G|ahH#sdQY{? zD5IduMgXFdjhJQ5S&pjT4*EMQFV%MY=b8N<{1<;XLJjg4o%?I~+L2WsA94p4eX$Am zY&^Kd=v}!pz z+$TR`*yLNY?zU``Q+62VjrajYxpgV`xT-g5#Zc5QvT7h}NN!7hv5 zfp#&D+foV^6m}mgt3|C6uXVwBLvD^$s<33DkHg>z%hm9hfGvPA{;n0MPlD1(z$g)9XILtLLh8okU6Uls{pK9TQb_)G60bdFeVk#&z@kc5|NrkzWJBlUfAOTcO zwg2nb-k-XR8#GECQ}lLsTv0QV)-LUBrH`J?o8CvXh%a(}(jF2kE;tyb8J z{EkbK#$;e(k&@@0PuW0+EsmeCaM(MrdT*t+Xp!pa9Wt~;L*ufbf1Np^9{EMP(Nr5v zu98I-TB1tpF&4-sWK?fB8K9xUEBe`Rx+tB(HFDYmM7h+!4f8B}uNIUlYW~~)U0)}@ zMN)S<=H-d3?!WDKBwS05TkS*A5;AZW6ka$8i56d7C4&vjuGp317NqO6-B;$|QPgkA zu~Q5tC=IwUbJi6aP!lM1C`Q>uvMSujuAZR<^2kck%H%}!QecB%nbHvmx;-QLwGftsDD)0 z$xbw~U3?rt!8K3IRap3iLP10o+<-w|>&3n<=ANR8xc&>ngqckeORMw;fCF-Ti6+dZ zM+ImFm1ASq;;=0QqT0KDdY>dry)RNFqKe_oJbg(fm({#Z+%-ZWj(R#Sz)RF3wm5S{ zuz}9YV7Hh?0FKa1H6e@DRG(36X`B z4-DHd&+b%w^Yr-yjZa-Z9j1+^?;q+CjAwae{FYp=%m)G$B= z`-}TnY?fZ#;J!FsoLP(Bo0Y{>?l?Gi7_yZ9T%S08e??)d3+D@PQV-B`@`=q%BZccZ z4_Hv`Vtc-R%VA#1^j>s~19g1kOtrG34^y`n7LC+CF)x_cPvpP7xVmH8F^_9GH?u|8 zoaU7iZ;La3j?^;==o%pCoAUL~j=xk+WJdgHZn*J(Mh{8wK2+SL_6DC2l(@*Fh*^w5 zh>A=vJ~Uti6rK#pS)r|S*Vn;9>`TT3Zfb>eX&mLz9+l}Oiw|({`C?wvJ>*x~^ko+> zXv|$Kee~!AX7g0T(K8LUtV>HtZQL?yl}yFAyp<3299VDwX;9BJO|L6gWHZhuLH3+U zy%oUbhHzN{y!^`iES>k|mA{^GZt42wZqY7gJUHhm@qVuVu3@xn$AB^KgMV)D&pZCl z;6#k)Zm!G$OB(LhWfAZ9g)!JCX4@G4oSoEgD_>6|WVb{%VLfY7k~7emDMvey<~@`K zqrSv|f<3$f^ zH+kZ*H_boo1Eg@vj_%=83KS7V|L(+cPxy9D?uQyN5S3;uZ ze>?ICllAKl$2w!7)ba;_9#8_t-~7aFe@6?#+u14?P*QS4F^PRS;~k5MI$0)tH{I0+ z(9kQ=m2Dn(c~@!-G!w~jKYzW@Ina(nz>LB#v?hJM-y@(UH4NtQII`l| OG!R)Oc ztUCv<(9X|8drs&{=%xXIBoPS9-w((GMW<^h`$g2VDr3Ed<6PrfVo;OZ-9S?l#*lNV>|DT=1ne0X zRXYf{hfu}_6G@MbqwK%pP!7cf8kCEb#eruo>tZe*D)2Ky%CA%qKIZ+F-)!sI>pc;- z9JllB%W9b}&^^lQ4L&N~I)%G!&A)9xbzq;i6_jKM*2iE-$%}uqW^v}j_8MAn$$0W< z+N7scs_1+gZSmxKb+eh7bu2`v+(t zqN0Kqpv4E+fo_gc-yn_bs>%1F$1bik&PjN_rk_U6q{nFWF-)~&fp&;M0zC!2c(UX1 z#R~n6{2iu9PH6u6q2()kb;cRpL_-NVAYgw0Y`%3rMWdJ>&mZn0?P{%Fr?1{|Mx#jG z-=U8V+`$d}VH4TCdzL?Xr3Cz~X#0Ssy*&OSO3voWjRPVUUy3KNDm)7K^&}UOfG`O} zG`Q;Ac0S$6iWldiB22V=N>r}f{?PYH*k+XAES*j_vUMdhiurwNOOTw{fpfV3&(~i+ zt$61Tt;D%mgx)=qG-Q*gs9fG0kX;)S3ks>r4IdFbUu?hkEV{Pf)6va1tSfRS5gS4g znj6##!TpIxq^oED1==EXZPNz~^Uc3mJ@7hWrp89?Q4Z=}ueiAFeHVt|emB0IDfiuw zLygxcQPNT=dG4KodPpLSJs*Wv25PffI0Ozti}s4D$+AB>bR(N(Zg751Lf52g6ij*L z=Zb47CKAs${(Il9PnDM2KkbAT%^$kY`e;7baYt78Dgz?_vMCaOl8q0lm|G+@?GN?* zY&F)~(7oyFU1!FcBisKuQS&W%sUE$Do*yY$a9Zsvw%wa)q4Nj{aJTjV?`joV#YJw< zYu!tU8?Actpm<{X(Q@c>Xhrb!>DwpM|@b&@E z8)&DRY@@DL=Zi($aM(Q==sU>!hXg5j9U3hnB_$D9iN?3!FGYbe(ODq$WCMz zgSoNH0CEZmE0>Iv${iQTU>@Sa>>~tKm zS8}bg`q5sSy-1=D#R-Urfx^>4z78QFN0Ylx)$P~@;;gTOv~pV75;|W?c2)?@gcN1^ zV!oTj^^2e_hi7*O=aUDq37(|?b!a8{sWqe0F~E4+*%kV9rpNMLhbv4&C8Q(=rL$7DZ%{P#_us-Jmo1$W+ghClDS33P zsl7JRsIzx7AWs4qT@U;O%d=mCgE~RHc;P0oX50H#&2hpy*mZV8C5w{h$0ZVB>G$FDRIZqX+&xplR&6Vu z1lPebYC7z}3ia-I>pEEbcSpYGx_Umj)JX+*&~XmAlH%UNCn-mr7WMaT9s7S&oq1G} z`TzED-;fZOz@$JCaVtPH#Bvl-LD3YK)XY&p6x-BL+s9o{Gz2vvH5bG*t=uXrXEJwF zbI+xmCR?C?52R1 zz8;!XXjeu{r57e>b-R9X_-YTC{ zwh+wvbJvQW1z7V(_FpQ@ZgW)n(iDjc@*>mYeAIOv4xhC%v>}PA*2RpvR|pVak3V-e zD%1i|Dj|Tlf>1u(5KQyKR!tLZlH|l3w?%D_bv3feN{BG#9nh^a8Gy1~7pvZ>` z{xcTPaSgN(P{4goZZl64e^ibm^NyM~_M7;R z4{>tT>h)pIFc=T@6yI{y7`A;ewz=RQ zOM3u}o|AwjJ$-kjAQ0Ec-NGIEY4+0BPrQed&bDQnj~*@_T0hdoqaDiO|Ej&?OG!THgz^Y{%{8nX7b*OsIDNV(0l)R5y`vOXy!Nv zHpQO{IT{g?I?70cJE_CKsNrM@9q9LgZ85ISC7Br)TGzw9m{x>K)}@C{2tc^N#|`X7 zgg;kbB3nivy*{@OY+SA(vgGYABTv8hXN2TQR2Cog%CY+NzS%v$(rOEL*MmCxI>zHQ z?`68`eNXiZ>oGd(r;Y=DeMd7)O5c>C>=3h3fnvroHES)J9B2Zd`4~#;poLfadzYVH zmD2rdTOVFK)!mmX47=Ns+ETTJ>jI`d%d&)+1c)hUw4@`jkdca$i1?T=^G|fD1h3%X zC?wAP%3y)ry1OQ2RI{|{?QfyCVd}%hD1BE%(+1-tC_#_%$oA&Pir(Prz@hb8)5RmT ze*`jgb$`<|p2-&8|Nh2sqKi2UU%|Iw3Q4Nt6=l-w$v{R?EM-L#xB&GKl77$v;X&VT zMp><>v7YGt-L^22o*9~Gg%*bCb7RPg-JKHK+%}_H{?8rD94_6gJXX@`x)G@ArXQ(GEum08Q3_0Aimnq zU-s%6!v|`3)U4|2?-H7<*W4b=k*}7Mo&6zU%KgnwptGAdk0KLH8oJ zgT4<_j%TzA2=>#bKL2+~(qcDCY7ou&4`LlMEr#m-pmBTZHFX$Rz0^2QKXYSy{x}nh z^c*%ywNzWNOd){*FnbP{*$}dPqinjuJt*S+`NN?w#tft6g@A>%C=?le#5|(f-p&=U z4B1?|j(Qt+^KQ@8J&RUM>)q>WDHZni`<-;}z7V*=Y%+g1S=^fN_3V&0GRq z|5Xcg&i4?rU+YjoPbn$ozsulWid`eIw9_ayS@H?3%40BRD<`(<+8&62e(ECx0On+{ zmrXN2KZF(fzT;t0FYNP|Q_tqw*U8P)m9eV^TjGq~aNo>FGD2~)Njj&0y|nTqOaf?a z@1H1h9Lxt-(dd`IKKS`H?^q_L!0t~9`APMt^%fau6gXMuF|Gj&!I zHf|kIDCWOgM11|r0h!?+(z$#|DPefydc5D+&uAkV( z$0XOwNj_fEsfZ(zhPu3zYb0~z#Zl#GsAcb~6Sq-i`d?+wEtP^1imVEXcy6?!BJ4IQWmLjjuYs+RsxZ zQwM$bMRYk`XyALz2Q~d2ti3t@B5^s$dCt~3m2aUM-*(X%l-ot$c}&i98CfEkq$tjd zsvIls?^z05=2@sVl}A?O=~4OZ#c+Quy+TcxQVA66H5z+=(vfi+HG=R&Z2l-o%j-}q zpqTB9JND)83r^(SX$}!cdlQC8nqS1@K%aOr^Xu^O=B#(}DrV>#ef3gCjuDa)KnXOL z1Ar~gR+`4oyUnksn0p>ZXnpn3a0#>$IyS`id&GoV+lF9`Sec0q=5}U!dT29psHfBp zqa`XLn=o?91L{-ZY?yjNd#e9dhG(!AC?eHHGb$kJj!yklLqa)T$6dSRYC}lKH%e-7 z&w5+j*L@wooc{Okguh45U9%5=8{lDo-gGQ8QV*UG7`Z2R8H zk7Xs_cSH~juG@R)gPtB%JNGs6#Daaw9?c_v#y&o4R`Z9S#TL&4?r`5+iBVnl__Hv1 zaCsFs7j16fHUzh@U?#Ij2P7fda*JjTc+B^(1!ov2I6fP1 z9**_#!h?yy9&btxJ$nA$;7rx>QR#jP4z(6b(QQb=KFd`rVsGZ?}OGSJ5^?Od90BD zPjjQ0%PpU)Ce*+XWE6&MG}w^KkYbc}Z0kJD_y(^6K~-%|9FIQyI)Y=!IaGN=q#z=L z8vH`0uY^Pf-voK@C_1_eY2;)meL{gPlP&yqn+5h9IrX@yRD02|k@0|l`-6Mau=T*{ zUy^*8WpVe^N8J8wxPZt-1Ue-#&MojdUK){Bo+O%K7or|mF!sFpl`JS$) zPwiqndXW#XZ?Lo>Gvb>g3y$@tDWAW@<*z;ItQmZ#iTwPlkBiw^;dZ~tkIS-JQ|O(d zIZfe9B-}e0<$i$3nM*1_6D@O66brFThIBH#yUcY&L-OHUZKAAt z@B!V0k@EJSu$QH}E;t-)6SHH+f}z5bFZiMzld*?he444O+*GdB_D?0MkZ~3!RJ#|t zRI`QB2NKpL?EVW~O)p|AKDK^UolHFL;Q#)W1Dyf|j_~*&K1n+e z-Iy6QmTgLV?QEqphbh{UKCyZ@<+0A41Ulb_p%A8Qd8!72EF9h51IpwpIiMJNBz5M!#(}QKxMQ0tiz|E5Q^iOKUkynGcx>xdj?e;Kafp|dVdzjCvq>gbj~&;#)$@nO z&o=U&YDLhYyw1I6PfO)1&3fJ83au zk31b4&;fnVP!ylH4keRP&GFyeCWeM#b{6KYhVWexIhwyAt-=`7b#N~#TTF*r+1OiJ5O?miT|tE%GPc6P-t7B&S-@yJ{I*Z+)L-KpyV3r@c} zoiZbUdmA8_!=UT?K^A&4op!a#ZqRodH=`y1CU3%u>iW*Lc{Znu&7t}gEex0+$!#Hq zafl#wTK0*u6OvdUHr*h)PYAAH)2;aCJf0!HN#lsTOU~q84gzK)2sa*ljEl{;)2KTM zjRMWhMO$a3!1#XDw)J9u4lHMau#fVcefKETC``Q^?lSKfkx>-yt}Rd z-=$Hk!m`^2)K&w0j5keY2}Z$idujOc zW>+lm`+hJm+B&&18=~EtUOrj|H_m71LOm`tgNKA=69mk9*D<}=)ZSwsG1t-=k4+A0 zHSKjEBQMyuN=A&wL*3v`LXYRV%^D)iVK+zJp;*x2%nRx@Hnx2-Z^;tcYI+Ytuf`#J z9D)!9`b4-Z^_p7}0pewL#2&NmdLk;JV?gqpXs3TUlew}%u0}NFaFf^1?PtMy`C^WC zXgAQ9BCQ+p(_hx#YyO{O<<{(7&Aox3`=2@dSS2a7IFs{yn@1d_Ye3S=#iv7p|%bc66M z=6JH1B3jXIj+@;MwWwQZ_xsEm_z=@^O!evZMB1(H)F0gC6&%-N`d!F;UZu*z*Gs(C zDu1^H3CdhF0ALabN$R%!vVdyo;tH7FQ+#Dq706)Tmz<=(q{l0h4!h`DrWB;C4MbEW zLSa;FslhBpC6}mNxF35GtA++>o7;(n-T8!xQ0-U}zre5Y`Yv$yqf58G-rV-{J&H+? zf{*q(wKmZ-f((iJkAuzo~k5zwF%sdZ7n82s?!C01%yL zpEexQti(=`OD(IJzz`pMME(12H}fm<(UVWwN@PEEHnc0`Rga#$CP$u~1UvaS6ZPTr zXt4}jG6*F0r%8vJ7$bQ>l6~=&V3l!*7iD05%Ih1c)0{p2oMm8?Wqd`DaVH~f@a9oz z-BwsH1j3HWEy?`W&b%i45~HM;_$I_Dx~BN~s{QY+v4UD>Y^W$VPu6iU?AvNnO;#_C zEUD1|2&ycPS@=X3(m@m|+%tVofAW+zYCppiWVo6k7d_O4T*|-eOKE&hG`?hH;cj^H zykM+!qzsYO!7>^Us_5;R>8g+f%yb+csoI$0!Q)5B>!hf)sYHsC zOL@DOinOWBzTvSzux;`G&AsjMgkpzz0~6GSVYJMas_VEM$3{-23HMp)48n$w|5{}z zIC|&iuBD*c&OPqy(C{kWLw5IuhP_ke>ieB^wOI(`BY+kFJ^j8r*#T6ebHMG0#${L~ zZ~NiRF2h>CSVebkT#KK1PbMfi>N4qq30967%H9(s;mV)}(IhR{z5 zE%uDB&44Y(vq=Qx>1tbAK!68mTpPu85$pv*~-Cb3 zO~lscA`e{#DJNyUl1X6_^WP*|Kll)B*Z%(T-N~XC84G9LbeMYoQMmNrr`r`>MD_*9 ztcCBLvHHbp%W02nZL{jyQtYCKevOgf*72%1d#vLGBkAg~G!MWn}zE zt&^pk@?PP^E#|!rU}4=(e4)wSAcM6lMM~;%Fo-cO&EyOIK-i(CMJf z{OyjrA5$}&PHIG`oqpSHUmQxki9N93nFl>JYIfUHIln$nEGIv`{qo^9YLLe`w^i$1 z-bA6*ann+aM4OXJpxrLly0=$ryO1v-BF0pLVNJHVl9}IO`-IR958LM#BVvH!+3x#8 zClfj+<3FB!zyL$EyhA$BBd##&f{Ui_Aerxr7zPt+*!yq|D>s~1vMof9QjIXy5zQAX zl1I)FpiM+Nwj5KnUh(AjFG)3*dZQv?wQmI-(9_S1Z?YfN?DzG0zgVA}<1|=$hiV3LzZ}{6BcYbX6XYlc|1#CEi}%@WZMLH& zriELjxCey=1d-^v*?8EH)d%_r258d5px?8Jot$!@za|l0Js(r?u02)Bu@Nx^_)swx z$G3QUzD&!>apw{;7Fp1^{$ekKFwe1z{z+SyZiRHh$RU+AihdffZPX-hu<``l3AV{ zD8?j&Swc@I)}`%_x}ACR_D=(BlY>1QX}cn0I-b>nWfu*qZ0olEu`pKn_@I0c^dDe$ zB6Jahtn1IG23K#uz%>&O=)Sf_wP zRn2IhA@lrEa!J4;dhsS1xm>DpPWUAi8$*(P-%xOH;Suraq483Wg4kb$nw-R@iCUy7 z*YTAtRBLMcdqqi!Fir4dXwC@Y`T=jRC5s>ut2WWLZfWo8?l4e&?)JqA*>(kiNt?yY zY6IdJ01f~F>DT3;CXGV0o@R&3+(sswu0Fr5eC%#n5~tEr?0p<>L!hL;5z4Y^w4lcBPk8wNCp}0Qk z)>%08`H~Oo50SnpR{>lCH*Gh$?L8{rTAg0a%6Y%nkhvjk$qZ6C#RI~XC_6MJWd7ut zn-K=deriC~c_injyd-k+Z{~qd6N>*aZTan9-XXFK(`5J_?<)^2_wi6SvfqvoCXxUK zEnGXx$(t=DU+P|4Trvl=WSeq=0`69_1|=GeGOuu`kFvWnpopfmd%ELSKYe`l{z+NSSe zY+GP$@Ri(ltQP8vMaD-HWsYjA^J;%e)Xh@ruICXq%MX0|!>vW*eaOZ2weS;xtayXy z=_@lIEN|w)m;R)$Wpev&ojzXN-R-8 zhs)*BuaMn37*Av-Vawof+YqVG?!fNhL!Y?$y_Peun0dx);(`d%xrVwBPLfx{{%rlNHz#CVmEP^5kWux&|wwqqYdGhrcbR!A?0gYn0fy z?H;;Iy`}-J3@;98@N*jH5v#`mM&klBTOC)fwsy3&wzOPr zYH4e0y{1%k_fBqyFCX{PsT01^CMoVs)oi2adhgj?3wWz$a6VyS^LSf*wL+h@674wk z(#)Xeorr9&tEfv40kVpOG4g!I1XRp`8zK|1ZLs)jz?SZaxH$7*w)=5fPLs?kf<*-NaCM+97^$e0(5&moxr7>aXN052~K# zmetcoEA57rupIgxwE2K(Ku|4+Ehk+R>7TrV-+_{=LW-BrqCi70qH>eV)N}Ex8U`ia zfVFTme%Wya}Vq?qad^B?=FmwiCP^9GtL%#`f{fO*732d^M_5>$N%oQwbR2kq6g&upGf0h zTIRqI=h=1b9}Yd+U#{q@Sel z4d~4-y$0|BL0AMk3f%A-e_0;t6W~!*LwBcmr z?Py)eyIkiCfEi>PB+%ur9_OjR8Ctk(v^y&?ZL8euGqppSgn=$N7hcUnneA= z=Jqm>J2^`7W5}s~*^zDF60_%z%2t2pu4sn@)?d%IV)7`}~7!^Qt+9bvVH(~>k7tY*voXvcQF zFsOBMYl;}awdS^_E}eN6T`%Vzwiz1{5Wxs9nG!KxhOToSUyzs~XD<^Q#Ga|S|AD5y zzeT@9hq1m*{PY*86O7wCwjHBt`D|u8-<%aQ-MKHq=pHVXJumc1@~%l&2XzT%{p?%z zzo}lvc1l`WdwX`~IecBB?(RO}Sn*q;+ME)!{RAU#)T+cyKmIg)l(K`((B=$^+0y5i|7`q-z zy20@oT~w4<<+V+H@*`EZxG1ocTH6r6+j(pMnRI;9ItX>obnR|JxPmOLpFc(Ud@ZXu z%|%41mG8#5rY$>~XUf3SUnYOdiYe5yWJx=<=b`eXLAc6`vJyv`4KTrvAwV^+B87Rd zoEre^W6wjXLm9klfOD!>07kaN!qD{mP%^Z5X;hp7 zZ!)c}v}N$b;x%RY=_jV^FDtgjzaL9#$n8axO4PZNu+w(kS97-)&s~~3SM8oYhTNZY zSdnhd6#UtUid$ZkEx&)wOu` zeY~IIfr_xd(i8DfYdp}&c~cVT>oYtSV}s}3X$=fC}1jmXCU%y zw7qoQ^h-a*U9FPlh4rqNz+Is{=j*#>*KXcBeI_>WbY}FfdR*gIsP?A^wrQ&|J+a=P zJLT!a&#ta38m~ZIV@lQUr3KB}p>Dcz14kwdb34BJ(FElnnp({(d1(mgdh>Pq{)tcH zBev#?p%Ka}3%c*EIV?Fh%d^MQW2db{QdvMbZwB*HN{ufoy}rb=>?c)&Wwm^5!nV5ZH8$$k{Pnrs-c%J;`o6SP34y;uq!{mI_0)44`2BaTqfiO!vCFy((f) zG8A5L!(t2eb(nrBt#>dpfT9^9l--nO*5|$uUxU~6R)k)9WV8yh& zIrWi6^W6GQFJ4D~Hm+Hwe8=~M8G@Y7UgxGFU*wc@-gr(0vGoIIPdfTdM#&$Gs-C_Mf=>$e`uKhhdGH%dXS9(7$6BjopHe*Bhb!(|6tGORyc3QRh(TVDga63TvJ@ z0W#4oMC+6vNb6qh5%CQ-U9kub*cj!^Nwf6d-GOAO|} ziwvrp%^OUuswV&bei2D;-Q$4g4}SQ$ks?%a%DTUJTCDr+%9I{Fp!*`yxlEQF;z!;1 zWBL3h=v(N1vBT%PX4N#cC{Ce583Gb~0=WZom`kPX*P<`x8EM+Khu%7|n0u#V)~-%= z54zdw(S;k@rArd=>P4f299;;#k_=t;Y-g zxzgI`i+FLnrl_ZX0mmvAx5N!`08;xZ17?saCo#$$vE*l#(An&4_!^18rw9^_WdSu%B?!EX01~@?noGl6q;f60*N%00|k{BRcf$*dknF3Y$$@khDj5TzjH2u zLIRct{7&gJx+XAFA8%*{t*f28U}t;?uH~HL*ncNf9?_4xj5v%~7!eV|SntD*#1aXV z9kch@>fW9Am*_h$GCF!Z6hr8>u6}Wyqt_6G?&Pf0c%bj-X^O3N6Y7V5AGJUy~SWARt;Z21>fF!5MB7vq1#r0$j+>h!8mlABn(juKjoGZl*}596X|rY*{3F%i-s2!DN7424!fEg6 zFcCfCcIoADA30Oa#9(O- zT^h%`43R+uLd3u#p?LcKm{lg6GgtIt@fB7BbLp|+^ziG0qwL*hk}?;(baxno8YDTS zgYI$PpZ)T7{?7DBRvn}FSZZCntco#x@;Ih{^v&g|=}{Z*KnHwz_4rTv#?9rig~~w* zEzWCQA9@_*s{z`syX=(VnO2=zy4Cc*OXuz^z;f5@L&Wy$KE)G&TcCq}X(CiMbJ)|| zOX1IwaSj)3;GIkxKbmk&tV${ZEVjvArae{DrnasjBkFZqUZ)4V;tmc(6*iEvhmW&H zL_L4*mYof%eO%7__(c@rkz^t;cckzev|nwJ^xiwkKv6P~Ddy}9hS zELLu409vFB!yqpBAGo?(BK@s3SuaE0B_t!rIa_vO5LhcIzUzqFnn z7O$+|p9TUhsbKwqy8Tmx3jrSq+#;U-C+}*c%}^jG{qV4gL39=kXtKc1JN&o^QlKG! zgq*i(AS!>fZS9AT%-ry9HK{@4WbXwUM7)#OW-S*RAQ=7;(w}i;sDEpw1 zni=orS5AYeVnNN;g!{E9ao+LL$|SdynY&Yl?I)^jX{_qWzee_)(y!7{f;>{4AWo(5 zTA*`p;pS~IpGABolx!S0I7G5Vq|injWT`9H4PJ9Sf>mP{YmIi$<7|ig>=RUyc`lY@ zKeL9$xZ_LPwqtooT~7Ybm5W0c#PSJ~37$S>(-nCZZe`XGW#{iKzD zP01{^P*n&Gr(ho4xosm}cXUe5NOEgREc7qQEv~fOXV{bxA)Ybl1 zoe2A{Isw=zKo1Zprzzp}xDly+4n<#hm`I%=q$b{(@6GY{7|OOt$loB3bC=`rra)0y7lP?VKpIoTvtW6kO#F#30zpTy6j*uXes)e@MHcQk z{GdcEMk>nURG`N=FzN|#|05DzhN0bqF%CiF?n=S>!o(sttq~urHjT3ENh~!PEgX=v z3y9&^(otIXe90PK0hYgBOyOA+@beiyj2S>Kf%QnbHCR~|v)NF#iwt036{O5~UL0k1 z5_rcM;lW%Pvx_Tc^^fD_DsL|d(t>@xwZxcSPZ@qqepeUHf?2Dih4#tinz#az6ZEpqXrCQq|FA+GlzFduz# zee_in<5OPVb18<`*?gQqRTSf{Lm2kX6qK1oc2(FABk&*|g7oiC{M!sIY#<1+E z_S)H{6mC`e2<~mPds>$aO5~4oZ^YBP;)oF029q)lL8{iqF{Y)JtWdJ-C3T3Q9Wo0P z5db9kXa((oJ_P*?&m+#zivt=Q`B*qF6^+po9fzDOb4FVqvyppX^lH~460tJes4mEM ztF^}84*rJcDjKz|)rN68Z0PPhrUbr4qy7WA6lG=%@TDT4#XhDfdurArFc-Tyz^5*v zTNaCoO%0Cw88|`YmVFQ){;71@<=Q`t$Y*dZIAXYTlsi+2Rpm4qq~P9uxb%m&ofRtg z3}Ad0h37sE8u-OBoy(D?y+Axt-z7KW{fmgo(g4;m}<9`uehi^oJ}I>4W5+1tUFXcCk2 zgPdSnDV%&A83ZQO!+~j0(r=oG0#A%N?Z}`ociO|#7xwbbe)ziTlg!JY&tb!@2(?g|Ds@xtVIUtlVMh1%L8q&s1?#wW#V z0#!c=aVXJ^bC^|!vS~fDPTw}>%$=p zc^aUEvARwU6y)o^yDw4fxmTI1oq)_38CZzAbD?YsL#N6clJ1&6lt5`#g&3M1TLeUN zKiulLq3<=xAAz!ofhrBaBk0lq*Obr0hvLXGNc@Ja|M}jUnkR9~SED0{)qD?;cnPui z&3(o9b(&CJmwCNV+0^R6-)J1!`;LK(Bf80F>Y!**ezRr9Nyn5KYfpPo%-befJL-{# zN)gB8W8x@Ezx5dG#Zz!^|csDo{ zz&NHGU&z~kYRBzwosdsza38+SAE7To(=cTKkk5vo&78+l5 z%zjq&1P#>FdMdejtC6+3_^EEZXX@KRR2Ah}D5@#RYE%G(*`+u+{Z*7oT<* zxR96mj|jH>tR=>-GdTdR6e*LMRaSq#pE2rSz#Xg&QcX<|89}Mn#zLK$u|i0?2{u2| z3m(u_o%yWLTm8CKmJlcQ(K9V>?3)}^PSFg8*^H{uttvDFY?;%CU(xfjQIsbyP$%tz z-;Ed-b&FAGqU^=f#XDo+x%48hQa_VczdMGIU@VPtb@fin)mP-K%1rk_>nz=|_L>7X z%dQ-^9_pTY(x0qNW4)Xiox0Jei9HgG1x?A+rkGA$Yw-Td)#D~nmDR`@Gj|Zp@w1{V z*q|31FS?=OTZ_`m+dM%6Ba0pYwPqd=YCS5;A->_Gl26igBa_!K7arz?s*7@6+g~>S zG~c@rVcb%F1Btd=+oDL!xyOA=DV}nS#kDv)AMxJ}Fu@(K?4*Z0Xfa@k+>;6U1~In4 zxCnYvuYbz(su07m7p@<6xmavBf`K(#dCF@^Z|VVtA2S)~I3$5{#K_@Xi6$PWC(lL= zlZMuVlqgy0i;cxmOuq!Oc31*m;RA@=LkoD)$P|u$&{Y}uXQifnJ6lqXnXx4~Gvuv7 zLYBhkqk@NJfIzferl&r$>b_FVd`(c<9qM@B$lvvi;-MuF2+X(~UIfg2(aEGAE3*${ zmhSrzBi;4`28sshbclt0hTG*SB9NfB(?bVv?2=XVQl@m5!#EZ#`qKgQjW6G0;%78- zlvOIoO`$NcYhBfe4aQb=n?R{yVv?0b7;Q4a_{wa_DDWN}d(PHZJa(6D-Unez%1~mYcV`-{^Z4nQm2wlGReuQe;}xYurJ%X z%*Uj1QVV z^UtstW^}<<#F=IKP)*a992QArB#uAb+BYzj2+ZW2Hfm=^@!5uqaZMavj^1cB11&Gk z1Wt`T@z0(ud|Z-XeqdQIs^>H#IjVH!!?@3(8FZE(E1zVe#f0Ua;=oUhM@e35>GtO) zV#;9mGZV9|2m^@)&~`43ByJc05cgS$Wx4p8%O~o~pf~F^_55^=O=wzD>p{!~O?7mb z`{E#`2mVp=fL6MJhal5{o;)5KFIcY(w*FR90fFk4-D#K>jFYPqy49L()f!>je$gPxvH`%QA8N@3Y-j71Q@#OpSbDEyW3Bl8FKYRAYA*dwpLA-E@1JF+G#T2IJseEP@2Idbh{>%DhC`jVo39AlM9;QXO9KIGp}w-ona2_n+GH z`XKmFtP2v006Gi$NQvaN;ZYz#-+JcL8di8LcRB+uh>^33g!qPkaXQB zu=#Q7OU>oFLQx4Dz6`cPI3XfapP?AOM+*9&a#Z=n$Li-U-ybh3>Du1ZwEgob(e@Vn zr+TZl;f-dU-kAELN%ef($rH|1h76zjUQMI4xhw|Qn3^ZjtI+T+yNrS{m`u8H5z1{e zQZf~nSV>X}ndYb;C6?a0AwFC9g2KegqZBnRbS_>xQO6;1L z9aSlDIqP)sG{HQsAj`tgs>H`K)uWXz5k^`e>f}JGmJRpM&x&J)CNy!J{8RtExUg68 z_^eccd2%v_%C1~~$WouaVTcyVy{?1C^&-6ne*N@Q`I@PVS<$}Vm%iRf?wMZ9`N(}0 z63LQi(ma+;dG`-~xqU>v{foZG$^d>Mv2vu#6rpPKA+6zSLr|krNh;8Holhgd4S)Vq z)cx(!yHCIVgg5IBtKWrYO~S_s~q&gnbMFq@!9cyCFj*Ir^6`$cYe&6 zIhd4k6gE)rH527nyg4W{VIm(aWefMM$VF zB{)}m^p3-hn^l(WzEHz{8Y4t(ML`rl`WM}3(st*{ok??+-<^&}0s0imL)*qbf0sp8 zzv>PzfDj3>#McqPBN|(qmmTLxT>>r7UCtSnq#Uo`lkYYk1Q`m zBKv78eAf*%kcmz4;=Ys_;Xfui&~zTy1*#{+L`)~bwT)o*2`u8y5tBDS9Is$W-05|3 z1FwfR?9l(T1h+uX^5|})BbxQ2Qz^trmoTxMT1b<5EB`E$XxcwDVX(2z=ERB(ayrja zJ}@^Cm;f-w<#2Xx`{ez+s(ed!w%AKEP7KnX<#R3A zTm1*n2{UtJL6R-Og)Efb05HZR@ESQfvk<_C(erwT8dF)_i5 z7(O#qw=vaR&#b{|8O;DT%Yw;CW^uks1L%oB^6Y=CZAD(^n8bpXhgq!ooX?{gzdilZ zUKaWX>g-1k@~sDfMFDrNr&=@3adkDHT53MUfC@xGGlJ_J-4Ns1C%v8!o_?3bA00PKud{VoAia`o4mdnxoTo|KK zq!B(+^JKL=CbOMVk{_2uaUnfD$a{a{{XSyR2$>G&D5OG>kSY3DR!>bI{vxiWqYJci zp;lLGIMrQ)mBPRsbnw`!liNO3b30DTF5~C>bO;PnUa#XXZiu@qZH7zrGXDt%tv(tZ z4?C-wMoR2Y_~qw7-qqJumm55{xqo}&5pJfh*A_4`KT<__Xc2b4Pm@s+Zc@=dpqmaY z&evBr;ic+XZaX5y(9O(u*xQ}Q?%%b`y3^il5}GUBNVLd@XppB<9cmkpBdqB&(o)Sr zU&518CioW)^8B_>ye}7S3*OA($41^}_Emx{)~9P$=I;l4UdTj+%&*FNeXrX>vT^UP zCO&%_RB+7~N-aQ0?{;?9h@2K!I6Zav4aD;1X*G-;JK^7&$P8c}d^+ z{sjZWMZ| z6C}niwtY;01bXflsl5JXioId*a$r;Sz!Ia`|Gk|);Kph}o^1~}lR>)> z&x_cf-7LS-kIsLKW(1>(>eTsEk~%r2H&{oE(Rdmg8Jr&*L|;c;x6l^puca7~Nzh!E zxJsQyA8xA4G{fnhu1wVBX3&roNGbCH<$-4WkL_SO{}zr%@Q3;qHGy+MTrY$RnjsL~ z#Ms?^xw#FWQBkqu%BS}CQm#JG0jT^T{UjoSf+@Y=Kk_`s4pi*X>ED)aGJz>q|GxD~ zrLEw@zq^e7d6BgFebA1lfAH^r{b@h-MBOQxyn)T#$-KOF!Z2IOB;dN%KEMa+Q)~qm zhXKUX|Do#5!;(zf|9^1DEkrahF+jvMW3v#eQ9uPnQ#4aEM*+9CNkiLbTo6Ig5VU}_ zaY0X2H&%Bv=L$d-oA^N5&)_zM&7Un*hhW_`(<)sH8teZ#W(UA(Gm1#cuAk#((Jq5$# zOah=LZj%7QUWBcq2R5)Pm;w5v9s$R-om`~J+JTDSksWbwm2ET7^(Aq;H=%3lKZGuR znmPWH^{1r?p<6$~C(>Z6;f|wZ_TQv+anpv8Q(qs3q}lC7g9k7pWBuZ6npxbKx94bN zwuWmEIxkwRMy*GHLIX)RJ)$zGF$USk(tDd9Dwc(z^;RaQ0sYA}0qxOL zdchgV6}rNP+t3=fYcH|3Wwh!YxY))l;4}bHG%82usRAh%S+MHT@x`j%$vJHg(f$rD zhb42Ya~po~0WgZbrik^#w$^Boe9^K?PF0e#u(zo(WKVEa+3GDrMRP@}S%D>^n7}sB zp$ag zVEN{ae`SDq01RGH1Jn|brEH#Ln1Hlq-;h>d8O0ueuoMh(B)-$Ql_8FV!PPE)ec_}O z=sS~eNf5709FFcVunpx1DNd0*pcHMJ>Sw#WI<`uXwG+c>mCa?<)q*OTw!Ci%7b){) zk<|MKGu^7Mk_39~bX&enh{0l{xd=C(!=?NzYNhzy8&g>)ucrSsjD79WFPvcemEF@# z<7YsWi6+Z-B<&92HO?6XZwH5Xq#9O4XHB?j{VX2mstnyVh*A^lhFe)NA=@yFAVn=d zRj3+I<)ufu1O-m(`!YH(3lzaTbA|ileWqD6&SgAQKzERi&J`1t0?hd;Zo?d|=)=Gu z4#(cKo^CcCFOs7M^ltdnhJFzMeF9C*S?5#MXK}?dF};Eu8b|B~f^R&@9ti!Oi=3+I zk(HG6oVGh9hTuf{`b_~WXl!Hv5Gl1csTHYC<;?_$0afYX5|uP2$m(JSsN~en4d&C7 zvv*!*!jpldCU9G1)o!@Sxf7fyUztdjQI(DebZF<2 zTz><@AggH{b3i0NTqhfbKe7r$fZ*h-n|F-OKCZaQhQN%?IGDazGXRCvPk3gQR}TjH zqiuI{5(_gP3lxY$o9rVquzr}0Rc;I$b{<8VA)!V!A$qNeZd4P{@L~ybi8^gPz$)!u z<%lv#L6A{}Jlny7SuVd#n-w8A!&LDWq)jEh*E;9zce4j2jpiN~QufS#dHLK!S+y)^ zi8>iFI76@|aR$^N{n|r^FMhRR?h)=f+y`Zx4{OQ;dRYy;yLmm@ZI?fQj-;L2!6|Xj zLhTw<#0eRW^f705t_N7nPw(cW*D1_yM+nkadZ}`P%be45I2xK z)qHh9HA>Z=KzFux>HoqF9^87(!>2g_Xt03U3e57Sw_i$y0ru1RRnvEPkR@CEQ0!o=`U+ z#hEQAf;EFIj2X(T0GmFqA4!qdyEtYhkJTY?2E<`%m@^Ub7gHe^hN{O?4VH#`sA?i= zvWzdA%$PC(IbJe$U3==(Z~4lSp*N;lQoSN%G{l4X87bN62wE_XgPDWliuHhs0ODBU zN|gjRG{K(P1WK_(^^q?>v^6Pa0;h6eu73eA-NGe~Q8Z^0e2~JzL+$D%qjvIP0-1e> zqp5`%5_MeG2b&>zsfrmeck5tJnI6&JhW012+mHiwgJu>BHIcbvoQU>*VZ@Svq8XV` z1HNF>y+4PX(%jkvd(*yp5_kOa=jKmMBYT~nZoBY)aFRcMFqi2hx)HG=5l^P4Cygz*}Cbuk#rtH%;v3#aQ-0U zVf!`-E>6bI8Xff&$gKRW424U2s$gt&0uTVxD4#dD+SU%>7{S7Z1#iuc@qy@yi=42a zUbhV8gG>!>Wd1wT^k5DNnR?l_@vnW+PzX_}4Vqn4!r8eaBUed8wp}te|H|<97XCV0 zEAZsK?09SA<(XsL`~4lD80y}lwyLEceh(S{`rY+|_pSN9lfVB`S@q?oclK>2aY69m zR=E3$k@KzJ2>3#Qhd)n+2on)tCdpf5qI|PiEL2+SUu!oA7Y9(h(-iWkAWUc-P6@w( z4jvXmqg>b;{RywD6K3CoYowbn3Tj7cXCEyNtWGCg9gIiU&nCzlVm?%qMe=94o^V*txz0O{&sh z7uwTMTh@kF&SiS=iEQl|uRt$DFf@au?5m>JDiij|4QKT~iVPyP=)Ygz0Mibay+EzC zRh-xps;JNZSWK8;`kwma3UX|>EAKu!7Q_6kGd4Je8HKZBnqs5lduD$ez?oRC5(rG` zu1JC8D*ZAb%C-g#hsf|^NU`TZ9!28ofY-BSgO%*!k(-3?DYH^@xCf9}LR2GCmD?{D zSeQST|QLf4@T#LT~PKdASa zfqMSMzs!h+x7ExhjBOpa0!kSdlKalq`EZW#6FKW~xjCaYq0n2v8xBXrIMC{-CQwW= zjyJSGJqM+>#8TjRjK+SE%yzhv<9!ESxtCV$QeCE)n?tYlfKX|ruwHI}Sr#RnTZoe$ zHO^zm{l#r+8PXK&?Zs-XsPXDM4IYv&h6#mUuwmce+7xgS2}#iPP8Oav^pzk6^MG;k z_~0yL=%3LR!o0k*~&y?V5Nh+NMAG9LYb$e z=)H)5!DCNvkaQtP?s^sQ@nt76%hV zSnEoOxd1qsSi0(|T?0QF5;AxGJ{RU0AS_6n0XNon(bt4c-xs)*h9SDICLGADTGb_8 za!ouMVuitH&{=>b!v(P`=yZMYQqmYzJt%oh+>i-Q@Q?NpeUR0EEia&7I*#+#xTE>r zAVA#R_vDXjVZPa)ehmK<@yH_i-QN#%RkUN@xgKkBs=ZDH6m~~{$oGYxQvu+ydIRf8 zYn{KI;?8RKX2KK`nY3yL0Raiw6?-%k5lq%0>yTN-f5TKN62<<()NaQps5aG{1nRy&ZJlg9|6jMDlMVIM1f z#0tClBKfi1Xid?%*yB<6+U#h4G2T6(M1;W$x4?&!`+oa&q$KLUFAj-9@+0OrDREUY zhaDM?&m1jdauL!jGb1>|i*=|lPYyM}jHCqC-8u-jw2h2HHSV{{5NI+_uk@@}Q&otH z4H^#v%Gf2kYC22qv3*Xi-35kLW+0f);I+3eKBc0rU{6&xt~*VDY(;z$gni8NmqkzU z=R<&du*;{HYTZwrf#WlO_iC&L?<|PZ=H+P0`f!X9)iOZ`zMKHw)Wke=EJ(#YWfhydV7fELyu1lPodmh~J^}6kyeN&kfp`%zx13C!W z1z_Zyg>^ZaJ>i^1`@HipfAgOnk)-RO(FA`!OSv+eX2* z2Kpo{b;DR7@E}A)Ez#PNKf}2rTono;R$o{h zioSZCGnwP4fKVI^jjI=zy?dJ6)G9%Yt@u<;?2fN7L9eF3-Mfs39&t)wdnyRWBZ3U# zBN3ZH+g?Gn8PbuCf^cyvX#+)>;UF=xu^(W&%Pbl}RZViA!WqfRgYZE}N&W7>~5 znN5B7cG6rp_OgP_?7!-;* zolAF}M+YzD05#+xSX}_F1D=VuX9xa*_K;3S8Wh`W+f<0^u>4e7*>pg*V?W}#%B=A( zN8)VS(L@v4sTCW>kV8?9!z0(+@TZf0)V(jh&aIQ@bIAC8>n|TGtGs_Un$NihI+38) zJSz0<+HM76wN@&wxZrA0ko8!oqKZAHVvF+fT^PgLWllL=%QnLkh5=s-k>As|Zo zY5XSOJ9YQ^bhJ?brU%NF(O86Hn8gBFIVb7>$il|#M2rg+&)#MNcsp|*z`u0{aUg!S z!P`TdZ{AmbMD%=2bVM)uTh1T&ax~`Anl99NIfI9UW$v~fx>mWbR4pL9FF~WHWWg@h_RBZFIz!6@6 zLJQpE_xo|gO>A{)3#%e|A~QZL@!~ne#N`9O$#Au-RC$D*(9u=Q;wjojgY9%|m?Vzf zrX~?hK+NVuNqzK2ed3L3BMaU|Xt>0*%pD`s7oeRmXYbmH6PzMqy0#eGG~_=)FYNn8 zGM#{yMiR@5G8hrnmJ0$3RKPl$xyqRu)W>qRSs^_Qk3RokY7cLxTn`c%#)&$S~u_So!S`(Qk9IL{k*w6U20OkBACGZ`83lkfru@%Mfn$cyQk zd&P4x)-9MfDOvP?j|i8!n3M&hq)b~oD@U<3*(+QEVE@a};ahB;U1gXv^uyM}CdS7y z@U&$r`N36^K?2cdcv%&RYBe}59tUs59>y=tFo`uMiP+BxJcF?*`&$o6g z^)O=AntxlztpxQ8J5SUkczF2(rO~*e!sb*Ro1t2`LlVxZpJe49L7;2G8cB^?KW- zi5QJoZZCjXTVpUq7H;_Rx^`c~5zwE|B~XYjNoY?CC+D`Z@}cZ-g(%Ail*R^xZ#uGj zU~0f(R0_bV_{y`0p{`!+p71N!>EvkCU~Ob%b`$r;sm4>*`NBgdg!tcN zmK^WU)F7h~kQ?R&42W7O*@8WfhAOpmRsuefLLRe~#UCcJdA`F(iB!{6N?6{Zt|Owg_tAUie9NE0gNZ zK$}Hu0ir)^V2&;w27OI+kTOsEl62a#T%BPRh9IF7%Mqka%K@*dQD323yE47+P!=Xe z2$B$ko{@6{8iSSG!n-p5=Z=1=rDeaK$&Jie>ko&zPC^%E*?#J3lX+b2kT>WI&{Ooo zA#^uf3$N@Y3|Yvf1<#LKdukhPeunC8jy-?hcA#lm|LMXr6gRk`#TC}Ww9Pw00eUG9*&cTG zwhEC-A5Cq1BFEYAC2DIk)BHte6J4OwnVYvF<( zP(5O;G0?yyTfJ?)iD$ZwJUtHh3C7^Q=5bil$>~iK+P?g_)z2>iv z>uJ{gAqkuh`}X{AKdYO!cQ<_Y4@`med}}WwPZXN>tr|{+RF)|tynQ9^@2u^G?WbD} zpJ%NHGWZuPa-B>W>$lc?bW~n<2T{F&n;W;?O4{e~gW;=^7gIa_*OiY;^QESi5SQSF zwn2Y6l+D6LFpVHhP5al1DKD62AqA+ZD!WxgFDjc$>!79n-gSdw+PJ_jx>#PpV_uT) z-i7fFhaoi11u(Pm6Y{c`J=o|!lGcZwoIHuF2!C{&X%7GJdGDb~!4V*~&e4pzmH8cA zrJof&ov6qn_bCl^G@#niZOZ{26aw zz1=!J@cvTHE|7HFjZ^QIaf%8(?tb&x_?}+V%HJ<;`EzaY+hOw}% zbv4IE{3^UEf9Af>IYmZvFNWEelt%(E7*#T&XOj7hnJP2Z;kkXCZ*;DN1gH<=A)40iaKOK@#J5Ho{buhHJL3o#tOZEzXbWP&s% zHDo^cSJ01Vman7is!v+=o&NFq8xrbTeO>#dfZ3@vzlpTpyBa#;f$Z}W5E^ICMzdHf z%l`B&HhfQ+IM)j5U+9}cKB}Mu`LY??h$iZE&NfguQEboyO1iBKpF$wWA?Im64}QFT zBtG#Tt_KGVXcoIE(jc@r6_e?0yBz>kD&(9;uJV-VpO3+9=K98Rf_6CB&6u$x{Q zoGPK3_ka>k&h#HWLMbqgC+@+K(&VsRpvcuqOglM zjHOf-aE6F#PJ?Z&Qf3l_8b+i1Y-$-<_SI&SF7=y)^!k6b=8Ha>1+tH4t#Mf$4=W~M zF6eU!!8=2li@Zp4aPd^c9_3EVE*N^Hu@l#vd~O_)Ppp8dnd1PRw}a=qdC_-JE{J>}Sew=&*y zccC(`&~fNZS&oKcrQW2iId$I5u=ZM6MbHX5Zh7VJV0a$We!G?i>8|0bDp>OrAy{Dh?9wAqn_`ltfNACDzkO+bv3i*gVdwWsAD;^#n z*2m&>f%RA7T&T2-OfzQRr(fXa*RaJ$!8J+Sg9g*<*$K4Y&)taM%(D4|4*RI(Iux{31`i7q;QD3i{UeJ32Od}OXTm!K@PD$eB`B89 zUC|btbWhuSgMD$d9&uw0uQWQbV%Oif4Sq&=RuP3U0%ymo3sTVcO+yuKL{9MMP=_UH zbg;b5YW?DU__U=n;jFH}TfXaw6P1h>T7Y^8NP}-*;v}gW^(Jim*W8c)wVr+g_|Yzn z0o3H)8fq;tbC@M!<-f*`+eP?0Trn;LFnS|nRAkV862f&*$cfjv?fnvw=R}sWwDmTP z2G3e|VsABxT6QQha0goHeGA4$LTOHt`ujIX0!||pKu_d zp%vF&Xk!eiqigTPK??&ZA7(mTnaEg=?dhx%?U`_Y5&d&WsHY}qae_eUFm9voobTZ6 zBu!+PYuhb&u@=L?)}H@>t*@sSWjqM-48?gF3SaT>PuvKeHYI2*U_ zC!2{4ZP)w8=l?9pJs=(E6u=&ncUXE$s*gY)I2>eM+p zJ_+T@etQ=OyJ3k+b7cr+tlj2ypWghl+27W%KNH+45xfLftaC* zO4b-Ath86H+4v1v%*{`{waVS;pB%5ul9c$QyMl(cGA04%CT$M`^~5xqpND za8KP(wFd)&9lvL2@j~R8gs?OON{dM^E>(Aa%htODm%Hk#yMPw16 z2m(er8FcWr>v09UxS!I8SDm|i?>IbE)CDtA*Dv2&Y)FwB3%*(o%G5B>! z{*Ic2VXgUJl=9_S=?K)>1oG0DInqmHHnyN}$J&Bnj}JkiU%x;8O~)BQC^%wkHW%@4 zli0cL&<8*9{@GSxkNL;v@W!Yl6$0U^+<`rbOkF@YUl9~lBhL0>V4Gu$ha zqjjM0QDdz;NoY<*+3j`#IkA@Ihw-~_KoYp26!{e9E$4>N_PX};Qb)ecK&bhSkqU5b z!4W!6fhZk;Ko8%Cm=iY)!`GRclU3H@Oz3ogf*56JqNF&NELDN|wql{9(Sj@p^jd(3 zG&z(*-^k1zU;Opz@z-yIUfi)~Dd$o(sZAgRN3(9aIwJSL!TWdOHn)90WXICS9^1^0 z>i*ZwYn)qe+k@6zW7bW_c>?F!e-}g@aQ^^2H5p@x?tGuZsMUktyh`UhQ(AQgi&5cs zx}$^DmiQeY$=nGsZo?)kAkOFE#iA<~+sE}-B=`v@Sw_?+2^v0z96f!Li*!x)*L@I) z=k|wee%mp0akWs%-C1+5+d}z8Lfu&RddvK4P;AfS6P97Nb}G#Ucw@3F{|%d{p5P|F|LoQL;d_G{ zw(+B$k41B`XO%?Vin<>H(mMmLP>Z_vs(!suup=WGf40OJQ~4oaQp&^m7^M|+Gy$67 zi5yKPtZw#B<>y}vpbhyh2Vb0AQ@X1og7Nr|3pbXPPI~T18Aaj1rYMQTMg#}A@;Mhlpv1FH&d}VZv&<`o2k_>|M)v`ECab*|7gAbj5rDeTDGoSKL=?R_^!fUiw}*fIn0hPr_?qw7h@W%%akGINang}W)RA?bMUleG zkIIyi?TN(~5kWHq?Q6I{&0%Z?gdmfJG#U6w_@g01D!|lua8qm8UFaSCgVoe$?`8iV zYAx>V3XVhvDh2{8B6jp^iC00E#Chh~+`1G$NS7SgagOMEA;cXTt3o$O@ziW4Z%k`7 zLS>sW&hv@mb!y?&I%BDMilv*w^1{iL2BXhujz-w3V^6suLsN(Ry&7D6Fg|AexcywI z%H^1OE|lwZ^6;7T@HYynWgbO7G;j4mTd#Jfp>+Otv^F5#yyXTV%ot$xf4erXY|6~^ zniu~4L#J!x!n4?ppe6D2`dyDPFbr)0WJZG zk3hvqtw+(Ufs(YAGyoFI*^PGP{xirYG{%+o5Q;PT;%RKR)NH5B@q=a>=($!$7>7rKS|UmJp*YWcs5dO2qK~YBp5{RaVK-dkfeFB+n_DvP}SeLGM(*IaZ-zZ=KP`& z$g~a6=AnUvS&1ppm}FDbN7=B)j*So#t1pCyl5;fHl2lpqiX3jDo3?DyLQ>V$qg%b} zrdPTTKZQGs-zeW?9DeFs#yEC(qp0pt>gEG?J}0KxfAyZV>Si&E?F^_p*$OSErFhsd zf5I@>BKMSrdFDW?dn)bKYL9%pqRsZ$Een9D6sL9REuSp+&M|#`)@w@fWO#|S_GUr& z-dG#$Iijw`#E|Ly-l$JRa5AEE-FUwEz)Fk1PH4nzF?&b|c=m)-5e2|R%AhWaj|%{X z3>~6cK6H9M{`Jzwy|hyg?ZVt>#vaKI673f2*v_jyC%@}4e{1J_=-(Teu$!>;y#Z!< z2hc@t$`cGTp_lzko>+#gZEbxfOhRnUb31ep1VW|yp+Z+oKu)T;z1v%FWQ6i>yt5cv z?R1TK)X_lP{pUS`_FQx&yA#YL8TT5y!v_MDfgMPDro$oBRjWyEYU} zbmr6EB)l_6qnrmrzr^M*yg5l@HxT3q}bPe*j^;x&V`qZ z5_d(_v&A^IfHmkq6G9n{9Dl#k>G|NmhF(9r2Di+i#F(}s>)2JN5)0v7Zu`coaz^~p zj0-BfIPN6+QPTT!yM|vKI9!xp=26W|Qs9jV#Q4k_N8WRT4K|Y@ybp`uq@1wS{A7ZC z>vtgIb1ya0+PZTj#P4})#9MyjSlazj(fg$1R8U-um9T3fC0jmi#LSl!S;U zVV))M+*6G~D!V5;H{*@{U;f|x1PJ^evk~b32rv-9Z_F$2Z^+j37n&LYn&f%i`IeT} zt6lfL7fYqm-_KNR`x99iY^oHcu4p&={P%hwpQQ~_d(`(Bh{9!o1?UdgLap6md%$1& z(A&2uMSc57gzAYL83q+JLMXFSwy8T56Jkl_AUzoT9s-kx(AQ$Ot+n%*dIESk$s?pK4d~7lCKC41sGOOndWXg1dLicR9-W4 zS@yW`-k0#-Ys?=v>$oA69;Ih;^H_AA*`yH<6V5X7b=ffJjeW|qb=Skq4>|~3UlVKx zRBbnI9+xgK@^_b?=9dZBW?PTmZ+(UMRQ30UqbJFu4sDXNXSXs+3U68C$8&7f1Y)m| z-oBPNe`ZUr*1)0glT%wS=XWcr!hMtvB5f_J_lt>)2hxuH;4~|+7j>l1?#NrP6KgIF zdDG+3!+lMe=#-yRR$GLJ+G_`+p2fO1hb$v1AA`i#ZLv~jB{bAbGGL~LD)q*59Rw^9 znO2GtsA3@aOSpNw4F)M6F>_gMk{jBT%56sW(|n@es;|slYmJ!z4;5~2jm$b={ddv; z)$dT-gJi-z=)L(Ch>I6jfD^*lB?Sp3rk|}GODiP{l27%n?$s4##?ZY#uiG$2*hT}( z`fG9xoR%_%m+N2&DVIY$CZ^@nfiBlalPYImVzbzaB(Cp{l$KafAG7fNkvhjy(CnHf zx;v2wn>B(Vl@@1GX(CAt~s@U#!}yt*QZ{dpSyO(dcFJZW7S|F%nWq5udP&VDh@5S`AYEF^Lxvoc{gwOpw<4F zrs-T+M01tHrN_CTS_Ug(3sC_W@A7#LBQNLy3b|fJQwy=v2r)1sLem+(o+z8nQ-!d0 z?71&*E9h{XMg!$1r zi(75bytAm=-1f#867{nsmbEpCyCcP5xZJ2N4?B6p%YKbiO&JkYDH0T?j3@UOAr~TS zRRUEQp3QY;kTe}lkxK*@{yNFlKOQ%cQ?_CS4K!Ga_IU_tgwViHZX-hOv#TBj(m!{v z$+Mp-XkN3w_*-UX6~BhSptzDo&W;Fd?-|97CW#c0^liYkke|q*>4M7zlLpC@88N znL^`m@EeW&*Y5}Y$eMS1%lzYVWa!EiV=CyVYfDjSJoikvGxVuCq>?m~oMh-qv+Qef zws8?j-xJmzv@C>cadRhFKGT?XcE8-A@lNbu0CqavDq~2uU%n0%o><8?ZAg>j;OwK4 z1Dh3^c5+}3>&}JjpBAs%RX^KzEFWAk^DScYSC2D@!{@qQt-Iz7C50OY1$Zj25=|Rv z(E*M8#U!(Z4w?ft28{Ov6~#>5Gh7k^ri9=X*(n#me=6Sd=cyUw`q25#KdkP(yMpK= zh)Dg>AAPYup>(JO4UQ>CrMw0ocKm)FjZ+HP1Mu%nC%(8!gov|FUyMv_aE!;LzSR{7W;C zU6ja2FYY@9U3h$cvqA#wIq_W zt1Mz9#PTScq!yIRpkPy4_-LkE2$ti6`~2wx)~Dl3V-(Zh%d_Tg9|cuZ*1+dYem7+G zSe6mxVmry1wzjLuj{olVhwr!VY9DMOGRUbpeFSV&eIAEMI{q`f)V(02kd?+Y=83wk z{AxB2Fmt=Xsc*xaKtKsiG!y{&`kXl#6!~B4HUSI?#rB_XAvJUNnE9)qB?ILJ1Xu~4 z7EoN*#3%}h&fim29j-&xFAxxPZvFB;e2Pq3R?|$s#7D97`SM56tIxo=Ysxja5AW1^ zTBTpzOswg+U+c}uQ}B(Y%KKiaByx`QMg%vlT9p)k0dmR{@i@y?}w;k`ELART3&CZsSWhzu?x7K$MkDO z?<_CN@`>{B&zs&}3hFh;j>ZZj{hfROMI`WFrcq-lr@b`9ceBVqDw z%s4EjC_%^KaujfFMuj2)H>ld6C+r6*Jnkqwt*h%s(MsS;r`vhPs)vslmEayN$g4VR32Cw6 z|NOkyv4H?LW?Saqh%k|qh;*0WaBg|Ig$=5(kInE*v)(pz0maYqP6U)LfgTzS+;}ZQ z$d&ESUuidBp3o&mE=Ai8FZTA8gJ7dL;AKjHc&-x?eR(HlD_hh;PE#3M!n} zl88($U&Pi{FFQ2r6Z2m+Dn5rj>~IYoKp^Kp8}g0zhV zEtv&_tu1TN!J*zVNFWRt63In}m}qh9E?_66)kbO4bm#NcLXuA}`a<`2or@@!dWs#} zjO{&NUgo0}XC!Gd0S<4Vs}g5RKo?pzTbNh;Ct$wMWpz^YC}g%CvbXR)phvXyS5ZLb z{K<42EX)$%Yr!0OpjL^Ui`fG3Cp?QV$Api$4%nX^X+Rdcz~ij|vKJ5yvu7fk}H-fE<7no>%9D~~r}_!ouaZ6;+X9lvo}Bht*w_9l%@M6I6IzX75OW@tV9 zrO#_L2qbXXx#>VmR?0%Eu7EK*Ryv~SLtXb+FfEC039R|kt7wR1tnIG$a*91m927 zNcYFaR%OUA_*4zf?M&oBGq!WzZ1bFOm@*+)5{%We4X+%@QobV$MLd4g^)F@omM-=Y z72oacn}l!dZnU}GNGqD@PA(RiLyV_k>6Y(S8q#byTTPr-5fHq&uUn9fRWEUnzzeKG z==&q2?^#g1QCXo8vo6LhQXU3rHWK5)EGkN~%!q5Hn7vmQR%@n5s1BAz(3`N^<~M|g zw~i_+Rx77CgC=$xw4bIN8gL_brtRuJv!}6xwe4i&$t);J^Y?={=6(K_&c8k zF^-|*Q!>WNE?^p#2?9-Ymoxgr6Ya~0GO+x9Cp9~o940O+Xzo1T8V&LR3C_nnX+C&$ zkMTM4!*0%=LA7HBZuy_lFGRigQ4GS=(GrNGhsfzL=QhGN8J?`REWK)TsJx0SjWjsA z9fl;KiZJ{^3?u-$h*l)5Bxp<;1Io+;c>9X}BfDItT4SQ*M!ey|Dv@+@j#u)cUV(#W zwgvj30O7ht|fCQcxQT~&U4pp^F1HbmR&t;S-^)Y2d zCG*$rBo>uOPM=<+v_G|7!z8cU_TFD8C?t_d<=V4DFP`<3`@xO?$v2ZgWPr-Q&ejnp z%5x?fgF}G3%`yWbBjc2Ih8Tl238WL}3|NPZJz&*gjDQIEm8f8)aiDu<{EP=oENkB% ztec)h>oNkMNyblAiui!PC^VO|+uL!F!a&9_W@J>044Gd>*SC~iSQ@jO!z!s}L%0`w z&B&Ml1TrE&5xO^;rS>dy!zWAJJOfOy(*Oga8)s;9tIQlT9FQf$I+Zdg7YV>nqh|yU zYQ=j`l}@JDuv*csXXT~?>4kc>!+t+ppxY+Kl$u?n8^gDmZ*Y;Ez^~=EV0~Q;=;r>> zxEB^tX?#iZepjv+T_MV!!IU*m3aY}%yGmV*?HsCn*fc+D#o%hkm}LfD3x58AxDGd* z#L-O~fM~I=wlU5&{kBnw^qq2O&HlMIZ06OO5t(DgZfw23ePKj&uGJp9(l7Xh5oR`M z6Nqu4) zM*E(#eIvcO5gki38t5})0sZax+&s1`Kp-NZ84VscE0_&)$yIIzAWsi_I!z9HimpY~ zpwZ+F;2%wK}5vC8Om3+c&Rm+l!FiwWLCNb0-N8W+(Qm` zRgN-Ev6vm2Q+<)rh-6Lg>6+>?zE1-vvB-JNRN&c5XF#Yo(=1fk7S`JRza$z6uWxr& z;(~|{EkVbcANzrl<#?SydQmIc*Lg<^XNKy`gap`LQb`NJHE$np+b--*{4xKrms zYc1Ku=1$f{f6>Kr7wNuaIs+u(D4B=i7mu!$t9dz-u7#&^r=r=z!aYIrMA!mOZB%bvO|;a02v`uxldm!YDxtkwLHH%IT@K3e_jZUPanVGA8S74eZwUwhDC z2G)m?0XTBrn|>9($$#TSS86!%w)_VHw$n)Vnohf+D)Zq^*r&x|{?lk<(77+%wSPRi z#o1anzm@(mai#8;vuB!vqi+VMTlsKG8xLxafgPjhZGr7vH#fY}VvaJX5-9Sq0U>n6 zkb2I#pU{C)q_W7V65WD~!Qb&4jlcPHD4Rwxu@~!z;e)aa^B=t3{$Ao-^C#5Hk;i_G zEj}&Gu@uq-6{u25VhZCvEFWWUCHNv)p@$^+zLttfymz;jWhXy_Ag}Zd6gh z2i{b#9Gd7s2ADU_>!54kkirxfDjI`BhisADhA_9#Y3c?(Fd#ORE}kKXmSg4CKK7wD zT{(eX$If(o_wrLqZCXPFEj)kB>85Flne|rdwc!3}kLdIYSfJ}yFICNG#`K;=mj9fG9f&${_E1>RK)YdK6emgYv}Fc?T7zBS6(^t;os*D?`&=c>lj6Nx4qDjBX!4gcZ6g#j0ropZhZ>g01ajHISSQ=V_---ELJPz*k2*L3ofwa;rK5c-oB zh}me!tn#Fx(9aUOhp${I_P`kh@i-*VYLmnaJE(FKB?V`(Ltq8u0E3t9ld>&VTpA+Tu}Go`0`XzUZb|59Pw9l9n*50dG}X6UxZO4-EtZ3i5yhrr+Kw;U2md zXT)egMZ*=N_F{qE8IDH#;6y zw^r(FgE5(cR>%dzzB+Hk8D(@~cvV>C2DcD8Pn(cWk}5eO7Rx0$y|bE4UjcyqIFHmr zg;e7kH>bu$I7y$J0btT3^G7I~$?AY|2R3v67R_1pSPiSxQCb}6&c%=f7izipAlz(F zog)tiljK+i!rVvxKc?P1EXnlm!^V9>LO}z=1{4vO96&V0Y802iz}zymWfXA1tO-SJ zopD7VBm^x`DHmMR(sU}@#&X5fRIJppsnpWSn)aFc&CGlMp5uMq=jD(4;P5{@;J&Zx zdwo9V3Ex#juEHjkFJ5HeV+LoXVvKTororxJZ5j<{UBeayN{G6pLN|f?KunnaNzPQQ z?)7MlnU8@fD?MZKPNCO}SSjf~N_z4`!kME}758l;V{Br(o8U3I!yn&FYzYU(SoO{L z_H(g?swt^0GUZ`&|HDS<)9bgM9dCd1Qki*pDXb2@FF!x}S97&sTA;cKg4eef-U}zr zQR7xA3>s8!;8pqDlI=MfH||i=(X6S&rc_wp@7zGuTxv+W)nms1?3Jw>LR@s~vIK8v zC%0?uMpT8-#LWe7?3dO!4=DNwepp5q1=*$z-!=E{W}wGz}~u%dEM2L|Ip44-;eukH+>1 zaz^FhO*9tu1fHYN|w`4Yv zWW&7MXpslFut-jjnnZ>;$!6<%$U(SmW%5Dj3C!6b%CFEe5Z8prKX^vo*fR|gLCc@W z8T&(ec|hQzdQecg@)}!vi@}VP(DH}V0RUs9ztjgPQ^>;5QxMir6=-J-C{JIR?0ujq*7HY>MLZbgbwSh;Zkqqbm*ZRuFu4`mMyEZUWKu@yT4 zLQ1{iugZ8$eT4S=&9DUmxH8=-^j3G&_s`#t#|@*M_-R(o{P8oVnXZ~{BU;oE=b;YM zQ8Lq<^{`;Ff9CC2!>j47E`LQxVGLzRSmSwf!I0@gFS*Zc)gMB%zHKx4?Cv ze{QtDO_5Rs)#M{eD z=y!&L=eke$i;Pav@yVvUNYG)_5vqF<^}4!IxvRvNvE46Y->ZLq@cEbIvzo}IA~~aI zBBHc~QR?V^S`-bVRolwm=`U9A%gcKXw@z!+^M})#lTbr$4J$?+xdxY(XXEs{g6--* z)lqJ5IO2Q%qkZH++6}zy;As)f>4X)-Ma$h|Sa-xnmQF8$foL6$9BCQipp;>e{~8Df z7NBtAcVu+LYR)h_%YoS|*kLXYoFclUdU72zP_N|I&Illd&4!b%{DC1KE8u_V) z+$V3!j~$Guy&Uk&95=`7?}zTMSa+9 zyAhen3S9_@H4H!cQ_l}>eKWH56~wApZx7gjtQifLeOTszn2vSdHr=bF27fu+dapL7 zR-TMCFGNRx4LdT};8RXH)hfE{7(uo$D(Gfn01=)E&s|reSEQIzkGIKWu=-5{@%|*g zU*r$65)kV+ths=!>s2{*PN4p4n*f6|6Z3j*pxUiy&ox!_Rp;J;C#Gh&GXR=9Oa#j- zDy->s;yMO-?T8(TXbZBR0<4=ExCV4Iy}J&kfg_t%RWXC(RQJ-rk>`7u0VWt>M!C6k zVPZu9{9-s;7Yj`%ohBKmC5=nq(JFr&$%hTuKDlf#TdPL#WjC=n71)0;}CG}YJd55z$6sO4!#{>5w+<#L2Ar*krW-; zBsC>Ia}e^j`lykn78hIGm_a}|Ua&k(@OB0!6{c$GYnAOmPtQ?I^&Oi6JCOHCd3jR` zVrZ5x?bOSWLH@`|=|b}=#owQ+)!1Xo+?99a!8B8iy15oP(b=^?0JhNJ_7<~;X=$v@ zGohPJ1}b9k77_M)+P>J)&`uSGQWnM!u{FdguMaKtThD(&M^x0sn%^=;VM5#LcLo5Wqp(+-2LdUt0>#eKB<2e17_7#h=sL z9e3YYi&)_|?4@-=E86A3;t*9w0F> zzzSt%rbw4y@d8279PBq~k%V<{g#&}Modr2uvNza!fs3MgUrK*XakTD3xy|pnYEviQ zz36GnXIB!{bQ^>7m46>ta=iHdeGcHr<6D||i(*|8UJX7$-gsmP8tqmKCv1T<) zHU{NZv33~o@Y=#bWt{n8H%1euq;@g|F{lH3uM#TikSCb#NL*nAX`Q=6KncI{|D9-n zBK~ip;Q&lDBwJlBA$ohdx;nc4E8e`;J1vrk9$daUTU0ow`lsnxNvV&=^$o{kw%h&U z{_{*+*X@T>TR+7RS2IMy;(9UZ+KvN7RsoqTa~l~?)2tq?-BUcYBRW!dJ;qJF z6OcieUJUurqI5D5m#2IfXe5peVKM6*n^ID6N{r)BBgPsYUi)3Thj-&+@MXwFRo*7zU8%O*-!>3xlnKU!5fdz*$w2gQ zaLBSDLyfhZYGXc@p5H!FiDOT9H{e7n%J&A%_Xk2An0`OUulDH{#JH5NSt=TRjHn#m zoOT1Ta@@gN&Sy)_Da{i>2jE$`pC8{2f#T&rM6;0+FkD<$W*%ru(e_`@{nCTF)5>#I z0uRpDeMvOk_)%Xk^WLL?k@%3h;LbnlT{j(f#s0P_|Fqkc*Jpc{9(E>wy3;i|8HEjY zF)_!xZ?egihx#6gijKpI@7Kw>tqFa#AVC- zjF1h*gQFuX9Oz^S@0dMtyqH$ zstp0;r{xYKl=0zwTQ>+~^-_ULEh1|+22<3ZXVr6Qba@O_@nl-@2i2FhRurSgGqU2r zG}4Oq@$NauDzhT$3;U6yfq4M_nQG0gMf3vPZ?Rbx^ddsgG$rgV6lO84ue?a{Y8u_F z(yIjqH^0nI-F$HN{e1qekMC38b~PNo@WawIqsz-nck8_K8G#V1X4vREcvu|>DAL#} zMZ(Em3mXs7$R$psp>2T0jKtM2ke%0AO9&6{_(`|nBjuER>r_&;)QF?VnN5qyuI{d9 zaH^5%=Eger!w0@G+fe@FQ{DIO6N)eOVY{5i@)nU2)yGv3Dor~ju1yj0r^sBuWM#W( zAtv=duG(!k+QeP45XdzrEW=aU%2A)y}@3UTib(U>)y?q z&s-gb+b17rV$k0Q!ry&2F z`-jm$C~q}7$~^L^vo$1un8Oqz`w5eB0hgayp*|ktSZ18_!TZ|VRj=gJ1wOnQ{fYnu ztF;1fobFfBe{qrCg?6;{Mcmr)%_HfUFfVxnO^zt|(pr6lCldRD{6r--ZZW&d(B1Ws zZVuKkG=>Ovg6X(9a=w(G@i;%X;r&$e0tKSyW2aPGKCypW$2&O{y!u9Hs9ImPCFkhM z(rV-Np8tGvb7!O~aEIXrG^~MusDUV<6RXP8P6B7fI(**oJ7wAApi~a31^c;O^BaNx zVASUWJ`_|_j@oPu%F8T6jf`|7(+qB=T~t}JJlbu_u1n&{@yz1Jiu)q^-htgtPHvDG zP*O6+<&&)RL(wUaQ`n3FcBtkM-ei)1^EZciwwPOn8xcW{u9`7sn*X>=bNEZw5xd-@ z+s}NNsQ4Tnw&dXO_vO34wFTvTU^ke?9l5vPe|(sWVCAvD2e~U}0in>qk@>?OQ}QI+ zKX%>yPk6#Wgct|lYFR;7$L&E!kdU-1duE@cVHZ^PrB|XYivfY0=yvRrXFvIaF0BMy z{{Ow-$Vzw2{ApTuBT+^60~zpXG+PT;ox!+VOJwZU&dBX;wemG(Xt+t>;kI>0?2fEX z3{o_!lrU$00qu0*al`-0I)k}Rxq*e0$;Tz$`S7W^59vQ|;3{=@474z3P%nj5qUOJ* z=PjNYs6E~`GiDOMoC(}L+0{(5GF`jTRjZl@@m9TKx;Vm6m`ThivuM2EQ#8V@z5`(V zYN9(%MS*T!$&3}*EW9C9yHTzi6nAi?A3Lf`$nt` z{gxyyHQOE?Z_PXzrbUzxg=B|l4YS@IR}U1CFzRt|9S3dz`aDyazboocUS-T9&OXxv zjHd?aQT&bGCyJX9EfK=nhYhFdM#^ay|3J{pXDHN~v|>qI8J&!2RGGWHsQw;*e?+2U zNp?iKmEE!o8*W823d_gr=C~CCl7A$F>J4#Zi+E)jIF=z~+>L?bxF(f%@BB-egIx(; zb=kNP<(wt>E%&k4KS7h`{8LQ_!=~z&1ta@fe|Z<<-j;;0WmV1M`tTMc?}q4WzKyFb zSE~fo6pxh%fy;)z#A&9qK8f9yr0`Yv*6MQ*m6soE-nOEhSqs|a+?0H_On>q4WvfJ( ztF;PE7xmZU!R*)?NJ=`8X<`oQD3xPe8~{ZGt7Uq%jDt)q;iK5@JrQbb7$Zg%pqMmP zOM}3`wkX&;+*wVoMJtM+uoo7$i{x+yJfs7))Q^>K<1}s%IM{9W`fl^YW`Eq-5tDGs z_u(|~ln!4?Ry&eG#ifn z5`NJwm6%V}Q66G^h6%wXz)G1NlYwFQlJxv)EHf3?Cy+8(34<#EB?0zpsoCkt)%df6 ziGGEdQT-h7YFbWPO3Fp6qtrIJ6^1*t=@vBE%+Hp+g{2uotafv@Sz(*FOe%Ofs@mfswyKmN@U{~TIbm21c?=iu#Ja{YZ9TRCu-Q`x^3}O%T76sh_$Z}B4mAC zaehWvWhgu03A;<|Sk11=`#`g;hdh?&v!W!^_P-|N{*qQGq53CigNSO~i^~^?P$|80Qjs>{l zc@XcuZq5E(Vj_!vktf)~vf$56t>h6yw)>Gfof?m&v}%O3bJ(`&fMGL>WPfpOwoCi` z@Qy#V*RDrNMDyHTyAx&fK%LpMt*vGEkkRAiAgIe&m3^KjY9UAJKyx&|>b_~BVEl*$h`TDAmtnWg?EBFmJorp*#Ov+(mkHV zA4|5-x6f7%#O|`W`^;mT>cH526d-HGcqcE)S_N3x=Gq+~PZ~CiXzC$kMzwi|SPmOX z;;UzB@#2CAsM)vscQ!W|cL^g?5}r0Jyl9W9>#!TK6SCLNYzy=HZs_GBueT!?e|vFf z8@R71eCr>ntQ&V&R>=np+HNMv{^(4$c|70nPU04Y-Y&wn+>>#8$-L^%ZG_Pu68#?B zJoz`;L_}Pqu}f-(VFR7WmUY`i6bM7*)+unDa$Z|Xl>fI0ba98$^1YCb?3*<;V&9c9Ea^-)3EnXt;n75~Pg zrDP3L$#0YJ6ui9qJOhZOAt@Ca)`=8#HB#K!*Rw5G%Ql|%(3NUzNc{=&=2QJI1mQA6rmwgE9--_b@JU_nB# zkpc6Z_QB7a+ZB7ieX?Tsg+U3qVYg}4Ap7c<-Qs)ct#Ld0>kN`S!SzM4MZ5dubEH6w z$5K>N53{UpWUag^d`Q=y;mSbZ!(=utEf-CAfqQwjjM9n?!}iL%i^>dJjfnwdb~ibX zh?uA=BeD4N`}Jfc8Ud2)kKuI5NNHa;(%^k+nCoA$W9gE{u;xRVz)w6DSZQsd70j2Q>uN5Rv-kmDke&IHk@wzo5K%hAm;0(WBS(?mft5 zZI!Aa!WrQ{p;0W|o#U!17Jj?Zd9mX{ zXJ<$ARD5CaappOe)B1_-IqWlorF;8pP6u)$C6ZzgpiE@I`Yj~Eu!7*BLbM?W6MW~Q zQ!IjN-(Q~SWa@ff;Jbm?Ej*^kj*uu~3_AZCy~Iqt#sPU{wFbFay_=e1QdLM z1-a5Sxld{ICPQqF4>aqX>weN^oBL>zAbLoVh8hmE7d5jbW&otfjObT)h*nN8_Fvd- zNP}GL>g+(@!au{ZI&u0HGSXBGEES?tGbb?&RE0-`w5R6wGTI~_iyRU=;Fw{+977i3 zcrl(mI;5osvXPvFQW@SE%-eE~iQ_6d`(|cmAq#}G#00Q6HN>DEZQ#c?KwYji@;drR z2)UB6vNdm=S(5J+kcYXgZGEj>C+(R<(xXX=Z<5nHf@rO|iQ#0pGR-wd>K3NYn9M+r z1;kk1YDDAYPvm!>Ay}24D5FwXVVJ*vxS4VVRGE}hIVu&CgZ%doA~bL}K}0|emhXkX zmDkmBD-IM$bTePCfOsn*O6DNn^HQW&SpiK^x{n8PIP~g3;~4Y%-*FlN{*U|f%qsQX zn=IXa0*3@ByK1FfA>-8G>(I<80#C1W6yD(ZM9P4H~ zX^}X6M=KcI^yVsMm+&4qIYQmsX0Z8`AY*i@d%r{ReM-v;C8aumD^KUCz*AD^$2*f;;&vje~ja}J%9r%F4{Srbj4h z+Rpmr+gf_lymlxvx8K>xdm;t#(Z?D6= zU8U?^s5kPV+vtJZu?sgo)vfW7H?nxFswuW=gFa;oJ5M&9;arHjOMp_Zh z{&Bzc?V(&!m;0$s&P zJCIL?k%}2wAF&h(L@|KWDaRru#L&+ zB036`9i!~u7dsaIe%E$y?Cj>;1|=e8sJpNwyxYe&*ze1$k@?H#Jr<%47=es0#4o#; z{P*H_&1_2H2S(*=au|Fj4lIx8MANDd(dxt{Wx-BFhOCF_`(uo{9HGzf9ZTVia{OEv zhD7h>8Gdzm>O(#%b*rPBBN%g9%qXiqu_)0kWvZE8T-%3nTvnJ<88y@sYJw~wkEM3m zq<(;6PTIP00wmGl&wKhFVX#U-~b+PTajp16?D=H1nzwN$3#E$mGv6dA<$P? z=P7&88Vegn&8z4{P@m5NJFzFkfE^09#QUbyoe*F^WDzSb4)uHp4@wc`AxiKZKE4qF&)bqPr z|CpS(Uv?@v&FEl5&E0jJ7)g?J=!vK=&FNhcAl?~tD$9#wdorb_f#gZc1BIxMvnqP!L7-K89kPI!us&w?L2Ca`Tt8^mlr zA_!(~1L#U!$Y35UEHiMbTO;Kk+f-06=i%>2Vfyr(Z}uaz9q%qXQn*$Z4W_VT{a6>0 z0}K$g2UB^`!A$wba$=~Y`DO6Q^#D+*u^p?|!7Ip`7_eX^PzC^7!_u#YPO{q`buQZ= zVqARo)Ta?+-^6`1XGT$+Pz&mqp=MHH{zPYUBw3+CgjRrZz&+%u=RsaYGeAj2CqT9E z*lB|+&{hVUT}?uAQQ2>0e>+}~Z;HI41Ur^NqY|9%CB<%}NhpGom33lj&6?1+X=GyK zUeb^yvZlck+UN8aG+eOT-SB z@kDuHKd?@+KN*UOWQz= zay~)LiU{2M7-3P)POe)6E;tX}a;J1kj92vx72BQv8@0w|e+e(85C~EzmkqKkxYuTEq z;d*uW6nG&((ZVbxb4-aljHrwc5B7UKKD(P|qKoC~Eutl>u*>HvD#m z&;coTbA+yF{UWapHN=3T*|o(L@R43KYY=u1Xs@}uw@(TyzU^G@)W2G2OiKD_<;A$2 zZ?*q2Hau06WNxmLz6Kd~_SQTckm!m{+DnX6XAp~f$hQ20!xpJ_g_x;Z&9Hk|&Ky`n z9z;x33i?55gDPPl__}${i;TwaQ}o|LE(6%?ok-2Pl1q;a^9OE@qCVsU9q*ji0@a}h zpDu0AD_Cjd{7U1wVGIvsu>1$pGLon8)R`}1elH;VK%#z3$X`-Klgkd!#E{_@)l2g6 zA8Qh(v}Y~9uQZF+G`_Wp*;e^6`f11qZ1JN%ZcQKDn<=}6FdPC;qKVQFTPI?jC@S*e z2)EOlnUYFwR3mv?Tf$8f992>~9cX^T=B*p(RHXMMF}cQQkYT;LZLxSS>983n;Os|8 zdC-;2ECWLk*TKc~^-14S2uzdA#@82Vd#u;<%cfeUBEOIRPaZZc9pw#l>XmNln8Di~ zW}uJ3c`bp$bbQ{wR{w~u-dq1yY|%Q=QRD89&M#w+F7C29fPP~$F}1&A_&8z5!Xbsb zxm<-1TB1&PfH`HoQC>Ugqif;WdyRFO`I#0VKZ={oE_FFHnMxEVDHIftex9I_^$8I8 zsC=oda`(0~yoPOXt#@3tAY|?qF9PCtq7eB$;R#qS9#PfS!Wb#_^&($~g-pj^1Dc{4 zR>N7F4jw<5x%${&w&tVdN89jJ;p4L!i0lI%lsCSa@VMD;&S|gD5wlJms>Q))>#qKD zp>NT?YUg{T3>5n54Obs#D|JwqFVZxdm!U!3rZBTv8KW6kA1#jCON)$!*k`H}eI#L&xn_quxE$R?x2_0C76 zj8|Mlf8}zBU)hwKWBzLKcoc#@X=waS`0iP-wFQnNBxZxgX|&u(2{@&RGl~U)jv8$d zAzL0grKWqj|Nf^nJM`tHVYS1Fk&Qp!NlfVgY>7K|w3dgm4?c;jV~Doa)h=KVCJ>Iq zj~qzQAU(WW1_>!R9=BR6WhJ8q!yRBt0n0S^hQH^0l27JrsWpy0*`4`d)!p^2hk}M- zL|R8qxL$?yr(TQ-T71(Nwfd;;i=^B5EBoiSS8uP@8}fE)Ya6!Dyg6Ub)qd`Mi1;%4 zF7WVF7!W}w;|3oYfDZb195DN5r$c9%x%jg8^lww5(qQ^^P3?U}iFAhwybE@cx5MAK4fN-aAkdND^Oz%YP!9cS*FVH4%Z(d}e%Sf=e-UoQRaL5L zXXijM{Uz7-dS;XllfW~YPTJ_n{&mK>??5DNn^-S`O;UlTeng@y3h(mIjAc4di&^5X>PBw9Ki`)$Px|$IdjImt+p1$5S#KnCr&r|>0ehbv3aQO| zX}OVS%ahu|%~;te4jZ2$s%BI_X*!qCCcz8EC^PIIM9$xR_48w+_#G|~lu`as05 ztI3-&0)4FR-LtO#4D8B2PR!rNk4pPvX7~WF!NIyP4J0OUt%4#9Pnp*e7_cZY7M~Kt zbT%JAg;%iXAyVl39=R^`MUbNI*s!hzu%awnUnGko;~6Pa!Bi4!Y?&|skOlRKnI-9> z-ZI?T(YuOR=rm|Zzxda*`U%6}sPX^yL%RNNIDrC&6Uu+i+O8jNYHhEnIbYw@+1`5b z@@<3vd1)?Q>T18#(Q;=lcP{#;2Ppn6)kn?e>4!dBcN1_M#ZeO4FdMC~D13RZIjkO% z+LaHjfm-y7UA@IZ=E4uF(8*|pBZQs~9mm>x17 z>ozF!9t;$Q>t$Vo3-oq0){)ZqXmNe(jK|Ir4JtDwN{lbs8{L z_6xrzxcuu^X=82(NKYRkBDWfizuL0p$kgidVdOxwIz=AtJhKjz;u{DYsuqNk>_G$w z4D|Rkvg13iQ^UB(O~=&+-=4C?8MXsia7+*vPLhZc0p_|xRiTNwEb>&L$XJUI{gqL< zT@x8LOz@X=v7{U@V{S!&*FpQ@I72PItcOfEcnyzO}50M8%dHt1mZ+` zPU)1UoK&4{ykh|u@{D`+OUPl-U*#usM3PIelBGS`gEyryHVc zgHlcy^=E)kW}u(r;yZrvT@F8FmOI@yQt)?IVD4@IEA`{~_fAGs$c_whLBcdPn|y}u zi7~IY$T1EaeomStpVmLSVDBT^YAEuN84mH_u8)$uVWpN314)dd{X=}<;j5ufbf!OCdxm5;+35^eW ziF+~}PPoxII9x~w-L_bsT^0ZaJ2qEVfY|_sHF*@#GTE&GPa}>*;8v1ORn#;u9~$51 zR0lViYYsOz+uvzO@!xf2ZP0~1pNBMpJHs`miBaBEpNr$4>w9}l0UIjM+PRSxu&r@h z;90iY5I0TFM+mCnQ3@Uz7YWUq{`gF;wlR=Yf!fwQ83uuVzA{I+8_}N;LVSMk*Q7JG zqFD)hw0rpJ4u`%N(smVrVjt=h7@UN)9k{&+yQn_qOHs;kILAguQWCuW@oauwMUzo- zHx0A*s%4y;`KhUX-N19WT^p=SR}=}e!H$;AM`)3x{K?1Pu2m=gmFMaJ_2u+M7#)8cKT}nd$NIw!)+Br}m29tU4|q{r(H+we@wgEzK((L(&tZkMzei zjeYCAy+$US5BpqiC1qcqD*}navWWJqsI~i&qZYMkxaVw~yRMkMiKx|sbr}xeg_%n# zEpn)+r8`h~J)B)RY=^-)e1;VSED577H)SsclRtJULq`8$_8w3~_|{;#CojUH!tEpu z9{RLmP#mWye99MZ61nA#P`kD+aq08k>T}fapi0BfMsmKd0l?M*fgV$47Zz_nBZurU z6ND{rPb#fW=;7Ip6K>m#afvpyt+lQ0n+*W6sI&ywtQy(|6$fszx8Q~}n4ggWHAj*! zveYrP$&osY-k&}qNMMxlu!w@;cZ)a?71DTGdn2ZwHWm29E=*7e%lG&kF4wk%-eD%_ zJEsXE$m3T_7SWwi^BCh7 z5?j|tiSxfE6nI;P?+KfdCxi#A-)JqTxGY0BY1?qbDJh1q+6+avRXN4arS4}A1&P*Agb!p0Wx%quUK<_c>QX7aH5ykq8No}e4vB&m z@Anr6n4`{S4-4Qy`c71ZnLgxLn5;K;$@EA>P@JaP42c1rv87yHUF zx4#alYFiU{Oh{?8ViRE<;fOq=xc)vbHtDA^tx4X|F002)@x7O7qz69bzQ{aYX_8?z zvK;$MuGh|ONnv|;sBsoh%?4XZaFFkh+O$~pCd*lJ)pk6t6oUy}d%~bRF?%?eDiwFT zC|s>WeJ7>+VJ+hFcVR!jfg;cl$Rhm(d+AnfpHIjSDm9tm4+)A$4P7}!OJYZ&rC(kqL@!xxcI~IJtJ@0ZS3V|(w%M%2;LauHQhjKpF^#QJoAXHOexAW!^5|mx~+BIfxE*}GJNlS?Sp_JOc%~In)Zf`jo>iEp1IQ-rp z(hsfNa$*bRN(Xw|&1R025&9Q17Sj)q1rk(%_S?Z5sQIqr4e}}pKG>wu-UuZ+rQ%y} z8Qgo{`~MX7-(l9D+_^oGDw-%&{rWzkrzn{EvKX6X|7!HBqAh#l ztJtauN7borh7dt>8b@OyKHTGSYsXtw zVY>NuP2s3!5d3uK!jm_d zX2{zY1_PV1PELwotc6iKctwy7v|eWUDkN3o+jnr};g0Hin5OG{=Dn9NLx-O zviJP8=byH39>3hrUfO>C)juPb-)#S9?}Lh;_PG9b=j?yHfWOcG_}7*5uSU)udsSHv zYSKN_T-RMb|LW>_t5=`D9z7liq5h){HOt(yI^%m64nUln z6*ItfC}}P`v~lYWW@BWZvX#`RXmKvk-@dN1gR#TtobXzD1SAFl^Dp&Y#rAPYFe79W zyPC$D#tYo{53O`>ZS+oFpk#aB9nU^6+E2Y?Cr(S(CVbvz9)6?-AK&S36IXZit|0uu zT9zaz6sft~9)f;z18P~WESEq}l5o#r6!#B(i~aZOmNO)E5%#1-u-le9yWkq%{SzwR z4<9d_k3J-=$}6iDR(B2mGsNh;7aZ$&eg661{HP3{vK?kWT|S%@pTRG8v+)kA=GMe{ z+FSL}Y16*>v|3uy`I+0z%_<5%3ZD#cBWZ~dLC#7HBHHnsI&?@HJO6vBJlfTy4Le@= zpYxB`59o%cJk-t$P(S~R*vdVeM#wKIE0y2&TsV3^`}3Ynr{R&k={fUZ^V4B!Fg3g? ziNuxWmPJqCEWEc4omrQA>qnCZ>QGUrK4<54?r;9i*vlpA>RFY55E&tYA0N`*b13IWhW4vI{P{)lXhm$43BCNe)Lp2kp)Am7? zNps7e6OL>Zk_u(@(*3Na*4SSn@hQ)SpTyxeFGpqOHOY!03}sAk36V3&H+18@>e)*j zGW+vT^Mcaw$kJeb6<}_Rd+$(g`;_e1)ZdBr-g|fS=qIqo>Z3X{GoJrl+P1k@wGRWN z@4dRd0}W$22BTmT0B^~B=blBXi_Iw+zh>NcsKO5!>2WRSdq!|x@F!Vo*~^9)p}~M* zNPF57ftMh7#m+OsgEl2NE{#3i@YmQA0C4bAr*sz@AmyB0Zi(}J<j_j`0jWr^+Qo`W9tlA6G?CIdbM=f>X01LB6r%ktUhM za2wM18YF4^+I^a(&dQKcwO{>eKc07DIr~gqzr7rOO&k;z26e<;yuHm8_lYmfn(|kE zUIDAIrVMxHps?AOHMBw@*-NoLei!e*+^skG_V>QGh(>nI@ay0ikmwi`lxl#60WQ6b zbHSxArLF0d@!h3I>siyyht0^Kdv<^S%0MY z%sFa`W~bQn2=$KIee-nBor8?6}==z=3T6@ zvME2e&rRbB)t|aP*aTYMT>F5b`F`|BU4&$+uVhTb1`<1*GdzfSnRd9u&O zmNpW%5`#H<3ej=?hM8b2@>_480Pa``nhB7s5Df3T&LvqX1FK*IGymT^%7&xbgshfZ zC?9t*`XKY2>NfCAum_4osSqMLv!|g*pYv|vrZy~(GPArSf%_vc^Dftq#FtE@w`$4H#+3;Pk4{!4-_9TOk03Se42Y7p zWcvBZ#}pQp80}%R+8UM2Ntsmb$V9uVlpoiIh*Ngtxoh$;S8IIy9ux4HNmq2Qj$s)?rov0jh;(N zVJB!DaFc_S3ech_R^BBKV1W)iqq)Ne`w(H~^<^qMYY|2vmJn0QvUuZv85;6zE4bbe ziz1aS#W>W94%7Ch!y4v^5dpkAZV1G3Fyei^v5T_UUxE{ zJ-fQj=WU4T>11C%{~k479#a}g^4p3Uj(lhsQry9xtn*St`&R7;3>0Q`5`or!-QiSv zG|2?>V;lk&RGz`c8@5D5NO?Hc#TM)me{}2Z;WShuiOhJ9u_l&ciqJ_-p!czs4RMFV z#PlB8(7OU>kFwK)+m>r86+Gw~Li`uhGy;`&x65GPqQQD`qSZ8P$;$*`PM_1Ax1)@BF$gbyBk;`E8Qf3cZ z!tds|^XT225;3@u6C60Qw$uW|8|*1)Bc0mQjI|KO;$X-oXLLOAJC4NAGV8uU;OlWo zF!j0TPm|t(9Xns=ENO_K{&xvsFOKxLckiG#B1vHHASovhbVuv|N{%n4ebz!wefVmx zn=zeiha0X?9=fGhUfLf>b*y3t4GL!C4G8Oia_4V~CfSd=AQhnsIM@1)DhK|zDKS$! zqeSHfqQohX@w@&akzFaZ5>W$kNQ+xgp*>Af+Q6WazOzxDLt!1LtIA zaZv1_t>c>VG=@`5e`u0t%%=4gwi>8KU8iWlqB9LU2+c651?3}xV>*nXw}1J2)EY?B zVvVFg#N5mhga*J`PUA0F#vNC`ytYpSV;P=dXf;3+!cW5aypU^jjdx|73Y`85HnJxc z(Y@@eJj`dYTxWUKmbD*xc2BacoGfh7@emWs>%jd}t3?YD-XqVgZRli;uP}+K(M=O_ zL`9-o-D8R_pjSx!Q=FjMa)wu&+^RqN2kckE@lATmJ26dPf(ASzOyh|pQUlw~{gk@6 zQt>g=lAZ;WiABB`TP-wW01D42Ni;J_EoZ>3rs{6xXJF-u6nUhAn>EEJfjOqpI)eK! z7Wlp?(X-TW=3p$%^kNI4#r9(HrOXgi0Q)>I#ne%mG!pFSd0$;b3b{PRwi_d;G$|2& zl=6{c>0hBv;|KQ|2@v`f<4Megy4n6M$ z*=pqGa02K8M;cl)_n~n5OYJ$-*l3bA=y71bZBxXw8hN4TaJWCcL>J%?yWE3*KcBrv zILZ`C3tF!nP5)`ntg|7jA20ay{B`c%zp%-Ohuuse2&4`ZcET7`hrxtS4@TqhusqK% z*t-E|>*_TRQfIhJ>I7sHKRSwz}lBUsn`U zN5veqWg4~8#%fxoW~RCK{@(X@&gXnSU(dnA`yYMtKD^%V=j*vV9+9Qu8%3u!wmc~( z^r4D)B6zh(a=P?i-5)mMz=0S?G@cseDDc+Qr1v#Me1te>e@u#;F0^Ah$vd&1M}RTu ztkH4dY=}XKUD6K_aIXt+=VSUQAA((RgwgZV)r}tUDj}*MY}&Qh98`1jJ*pFYc(dZj znfvm=YMaWmiziOs-TU$IwzxC~Gfa1}s!%2wSsb|>6AwR(!Lvn#!4(#^G<|~$r<*g_ zPlVAR%9v+d;WUM69MWG;w^n9ii`(W`A*KmVS$2jNHxDT)G{04R*_4+|4j17#WR6WB zuH=bI_dEXv<@IFmBUH`q~z@S3}{AU zF>>V(6RBs{BA@@zd+TQ}2--e-BU63N5}29Bwas9M4+Mbx1JkKV8IxHtkJkC~a-Huw z2}V-;36i1e7b>h{ETo8`9c$@YO6LYT+4%a>mn1m394@sjE(DMZ&qH}TVtG?kj?y?u8OYPc>q2E1V{uIC z9#T`3uP?O>X+cCOI5;fGzw6%x-D7`Q#2x&)z~PK-3dT7MX>IKXt$-Q5Bcre_(IwEW z5PoktP!_Qq##%cwUU8D)IL7e2dJH_Em>+``r&~pM1!*u=8iY>ZvP4B*>aymK+)Q(Xm%qeOP2g6Fazv!RIv9DN+25-heD2x)$hD3V-k?7o8N@iY)8_ z!LIF*ZyP#L<9YUuw00?>N8N9pgV=*BgOST8MmI^U`Hj_%VqeB9tkDu-q>rZ6{Yc$_ zGL8(vR4jI1W*P}*06G{sG!2;IDCAsiAY9nZa`+(T#j>UKev+wYs1kVBsMny{F>>wh z^OL+}%7qtK>RMFj`fB)aHjE!h7=#qp?=0c-WbBa`nz+&=Z|xTqx)Vs zK05Su0xmRk6!u%e zjgc3#allwYKw@mprqUn;iJY%TeI@8=hzVbQFZF>nJTs55`t#Wz@C#1cS@Bjc3j_FC zEHTQK`O=;g$Io6Ub6s1PP-O+Wg@v#_XcV3?OedBVi1bIs`YR*jZ=R5Jn;h>ig!$!k zo%lg^ac6{c8Z>PLm_Ca2R+R64OEm;1TcVQZhz1szNXv+5A_9G~?Y)i7%v+d}{E_wX z#Ta62rMbs@^f{qGCeD-Z!|leN6Ygt3b+)+*c*+R+#BGp6QJVMI1mKMx!4tp&FG+s* z^A}Uuleu+%b;q+Celm@tia4qsHiLCJeoUyAttMfYIX_1RMp?h6MlD7OfBv}Qd+BWM zD$@{71|p7Oo`*5vN5s#ef)T?j)y$yuD zv3fr9Xk6_ZW9fP`d&}t4^U)>cVa+5M3XR%qw<%V?UD2UWz^aa@r5oZH3{)zPjrM=!qKd%$hD>I!{fXN}quG5@JX4S?5rUgBc zrj;Q@A`>EGS`*!B$R^eLCU-c2E~P88cT{)M=P8b{quvwD);-=u;uf+a2u3WoxrPaU z_be@CvNcz*TM`n91rH=)-=J#LgBbCSU->FY-7}4okubgnL6nK1j4Wk&=Gzi@DH$q|G5!paRNYfxcJ`;@V` zOZ^l}l6c4Xur29NiP3;v?rJli`cu8D88Iw&)J&~VA2qJd1=$6apX$~LT0N1&9sI1J z?`llw@UkuNIS&HH*-i7{or-%L5A6@{Q_hFF@s#u5uoi+Vf?tz5{JyFF z6zdaeIT?i^bsSj8TGnKJgxxlcvTPo(|520GnKk{O_EhtX9N<|H37!2;|NV8m3|8WQ zH7OisP8{86LB31si*+NS!fYzle5xb@+BC=)2MHZ=+~jQf)%2isY-k)Oqmavhm+|;@ zk?k0P%6K`v^R*r|NIA#G*hkg{1Z^O7)=#sC!6e#~JY5K#R9L3tk^u77^#2ZQnE{odOT&Q_j%I&g2G`_7$f*RIg0J6ql? zRQ`2U`+`zpv)?%LO>o@L38uQ1-$(}wKG~i!v8WlA4iOLib`y$YhmnqgXh}HqzmL}_ zFdy)M%OWllrZ7O<31u!x(WiB1+}vv0Su7}mkui_<5{BZ=6~h^Mp_`&Q0c=8W8^tKh z2q~TDmR9yt+X}N+LL8fdl+ot`zc+7J;{5>A@=RVj21>SOEcDd(#MmMB-f3>Sm$yIe zL_<~NCr>|Qa-;fCEIiOvg^IfT&C117wpTz4?3ll+{`sFtseXWhJnYd~&fbA8yx|BtG6?iZb+FtpRD}HMadO9-7d0oD~K1uxe zr6xR4l(GBn%3$QujvqOH<>T9BWZwi04Vne7I^2B6#Kw-$L zdQxr>m*vMKt$uSC#@*;+M8vi&bo}@*tOIs$IDpt%1Sijaea%JhJAHA`Lrp@G!&Mzc zt52U7rifLhgOl_8744{FIy~XCK%K{qXz8eShCb)Ci`N<%1`;Z(n~-QH`=sgNRqi zCfrfa0cvz{hBF8eX2X|EbqdSmY^*_8X0Wd+Kw8nQZ8Z`Gem1=8iX4q#$(w)AoP5o2 zvnXw;2YVgty#3h*%xUET$sSx3$tRY}#WihQ1Qk$JvU;H!IdHkJfOA@YFrtjeZUH{> zsRjUYz@~T-5WXyLVtA3gRmJN0q>e1l{*JXLV#hiuvuTA?r`NDBii$sSKx$ygYW6TXVV`;svvT=IG7jvCpxZevU0DPBJCO zhY#a}!^4au3>_D~tstbJa3aQ}Ar`HIV;MFBlb7c|C>_GQs-7^gMoL0k@38rk*b(u3 z*Uw)aYx(K;?fu6(qHk{n39MbZ1TE|jPJ@@CW;teYCbO*6ns|c~t(-S49%jnhNL#&} z3&fsK%Tw$V@)sc6zSfp>RM){r^VtMMtH8xeg@(Ey1Cgb?NhJ1$*>|iRCcY>*`wf<5R+)?qq+vWe|INfj6D6%=bNRULfH3B=tMRG;rbF7p@w= zJnBhT~K5sU}>@i&gho8-#Q>6pGUo{ z53&K-hg@%%*Y=G6oxkBbThmn9j_#KiD0klf9`_Q`()8eD`j20W^Pm6PJmIVflVF@< z@lRQS8x_C1H5eQqB_2gt5~8~eo!J<=2h?dR#%=fpc1HccPgN1FjAbH+$8b$fl57vz z(#&LbPy2|AZYQtKT1e728B;?%y#^4U?`}EbX(P^Ov-3@F(3LO(C`KiskTdqHTNv@r z@K&BE5IdnrpuG`Cexfc)L&wurF`Tlb96nS;8?&xa5_7``T%`NQta`hlyYXgm*X9#Hh+9G+exQP=|0fg1VAqYf0>mU607S-AG3Sc%Y!hRc}^vW4v7>q@psDw+w2vv4aTOT2+ z{Yp!G^O=VU+6=6T9p|xo={NhA9j(rHeyn2sK=P7T9ocf`P2KDv^Rurct4lR2S7Sy- z_g2CjZf?K1`mc{#Pj>fN=+wErmv(Az$Ypuo&nABkD^6K?8hrl10TC-1Y10;~%vNU< zbgvHz0lr|M*&}ft8F-$$qVIm{U(w|ZM=x*}ac=z>ChBMCEjlvstGlJ7GIoLt1|2vG z!CIGlTbfPXbitO$x5{|^NyP=0I$mh#3QAUX8h{HLtzbGnu9jKV@c}KGo)Hs}`yn*`%)c zgFL*1Syc+U#&T^dD6?=&mVYeWUmn_jk>9vf<5>=shC!kGC_XH^xOSK>u$iOQfQzzM z;0Hx;$`T~rYM8Q1GKz2UZ9ygQ+`HJg^=P(smD;m}Wt0k62pk=&#nc@S=t@0G<<&z% zBr=EUIx~nzh(!BIaJ7(=N9#cza1CK%Qe{GkylK@EeO&j zil3VuY-b)8ROSR@dL#@^6;tCNeRe#E1BU8eA+davFJlAUr_rKmTvW13D&}qyeu%cu zIZgSBEDkECTqw1yE1g_b1JyC43tkm3BStym2mqZ8iCgVP42KTKj&o!Y2F+9L-RkJQ zZE~H7XBb&i1hLP6$zn5Dhm&1-QkiAgg@)8$M7X0a&uh6yCAuA^0A6s(i|SGiu_HYQ z=p-^^_7IFO&W!uH2ol>mvGW}Fu4f9*0HqwrQy|EOY+5Z3AES)QjpPT_GTDi8g}J5e zEQPF+HGxg}>9)@)lB~#O^C(iiMx?eLE>3}bvfsY_p2ftcv2KOm7c&3d!uH(yrN@Ex zw^h404e#F(jl(8+aKkhqKVg?IS3UUf^iZqq_T3;~7m;$zB4w|8u)%FZe{ zF+x}woYdDB^31z2#dMSS#;^7dZiP9cfMr9Du3e={!NARpCK}~Hs~}e2{Yk3(c=6op z%H-PodoSOoQCDL(Kk~d0^!oUZBp;7nZA`*7W%jL3NQiyY;AzXU4uV-&e>w!qv~98v z+x9w73oI{>D3V?ZACAT$mP-nik8i-5u|nz9+L-hG5r@|aPom5%r$32-@r{~nR<7Qb zWfQUfj-YGvr$f7*DzCqJbN}#5xm^>uH=AQ-sRkqJhQn-$XIvtv63lLdqfc~XQd4U| z;T>q))lTJQ0*h9O76jrdj4*L<;U@EcpHnV8n0c7fgMRFEWbfNQcd}ony~^172y&?G z&Xv!PnL`j|D2U{FS5HU@(Vm!afD|GRN+HsGK9&nVi+2kN-8Py^n-Pww>A`@F#))wt z)W8*)(D4HA9QTg>l2GFDhbt#eymoEL&d)`b@$xfqID?tGz-hE$U+7a1NrGFqfE5TZmt%5NFu zv&s`u13)|AoveRy-{tSQbMMNV+;ye?)0-3SelvUMAD6y7e$WDQ@Tro~&Tob9ImEJu zm_zM46zWhY2IF4sNtaxaoT1?xD7K85Zsc_10JBwv3-@6*)fvM(4xPMqB13y`W3ERM zu*FLTjge`sK;6)ORl&l$8ius%P>CsoC~E+|RFwG2F*g7N>YYOSC=F^KYgZ8vH9Y5S z9$hMZYRV0AvA6yE5o-HCzkIab2YI>uz~;&F?N4oW+?)BIzS`J~+;-)B?v_Hl>(%;y zl|_hcc3pZ2oVAB}{r%t0AA1WqN1kwa7DF#&#Z(Ozr$0LyQq`NthYWfaFUclqWJ&@D z@a`Ve&R6D-U|&;X<2#tN_X_t06OYUKD#6@3WS5!uc0?^_foS1tz5@?y!^IbdK4aP3 zMv|`ZCY=!47om@x8bC*_NYRe83V7UBcYUi*VNg-luh~hmT)K!XNm)a zJ6R3hlc4j6xH^vy?G>8n}SL0B4+T2#&H7Y%6 zDqyoL^iFdhpd!eLG=z;OBbejuIsZ2QEFZbuw>YJb!>xOL2te4Z_u{>mr5;aD z%gIX3sL*Gz9W0vtTH___@Qk&4KLJxB0sD>ca<({_<7i9(UQ7{|r(XN%mU+NR zpnn*CdP;_{NC!T{G)S_;6?A@oUsWw*+H>{$Q>7pL)j7GW+OSyR3}-}{rnGo*8jDz)ATeAy=AIVRmFoxRJun!z(YvmaHv&t zaCJ9ViF+AjHx>E7*5q|dx;J;NHHNwS0cZS3>|Eq+dd@lzD=bOdvI4tp4FI?TCTsFZ zkB*SPM}DP2n4IiM<{Uyk4rtNsu>Z{n`9~8Y-~=o7eaX_&Qm!|0>(iaiq9( ze7xjTWnuZV3RRV|T%oLb)-}dIBbVp@kPS-JHFR0${?D7P>prH^`tN2IHSNdW2MG)M zVu7)52-D+qaq&^O9aJaxv9(3+)C0F1n|Zt*^Wp>0OYdkf^r7*7X%1`wN|>w3R)Cjc z(!JRk?#OOLVI1OwlT-4gPx|V@nxdh1o#2p^m$&!-!{L^}{P>jr6aD1`&hxNBcQu8? z<^6EdP1(Es@$Wx3Pg6`JB6hUyVXPGixwFX(q`>lVtyZ9q9>3q%9&zf*dbgw`?z7&; z!lSxEyuB-aeA%^&?GDQkOB&Pje0@Z5P$RT_xhi&10h-~8$~w|W6xDomgmh99p69{j zkc$;6hVlkuhvfs|ECCTX4lqGL`Xy>Yn-c;E6{FA&M>Sd$-3fI=n6Kp%QxoLzLA&J59ZLfucrel#3x=4 z0|%ye9v?2O;tVSHvFJ__RnQKvkol8%3B1_V1fv0#BsZm6$cS{$9)V#sYGJOE8$^(~ z%0&bRBOY}Kyi3{ToV;ovvv=WX%Fu7CuM=*j9$K@ngdOhKT5`EckN%47ki}D4uW@!r z0jsLoX^AQ$QwIxA(u2;83ta`A7mQp>?V}kHjuBOBD)J$-Z8NXbx$wIsXYyQ469n^X3zgOh;OYz7W#^n?7 zv(=Li6k&d)-52$j!suyY_KbVuv`cG-y>;2tAPY;k%b^fCED74B6HuLCrJGm`CBUx7 zdPj!RWr;OOkS;NO2fDiO3ctKiB{OACEbFo*(K&cyarUH-Oe`a7=bb3U(c#r0-X`Kg z&lMwndTwCjo~buax=_n7-MQr>n~o`=G$cU#S!J+m(04vt{4q3Vq#$aE3{GGX@7#9u zNB6fhptt#xW5F_sQt1-#zSo1CHICjFtq53duy8EIMwwDMsu8(4{ zj4#46Vhmw=PFYX90St%(ht(R6=eTfE#_(OOYOIRAT5{<1DgR^Md!*YUEb=~A9)}iP zPrUlOHGX1j&w0zi9k4Rl$i^y=4PZy4=N^ik)qboAKuknStEqpy$R7o}3> zS^)$Uqk#AlIkHktW3|yTl=FI}5r9l)iiWfG=~J0`ws6Q4ZUCFm4F>vP$(DR|^@N-q z9IP#t$E!5FL~Q%O<`@o9!?EoY*e(l3ngj@_>*@?)HZxK-Px%$z^{3i3{gDyCZ(j=h zuA}9l`#+P4tnx#)nC)-4Sh_n>HVAF9%h_g)*`zBUbbtf}(Zqzi1@O(Jb9UIK3gD(Phv2bW%5n8O6-m?lmu{bB6;tR2;SSvgv%ktJ87xhv^v4o&P$^9`%1!rg*jFM|Ws9Ojih zGg~sob@y+?<;__3It3bjP1-jCj3xTJ7c(uOMG|Cbr@3W0x&iMot@Z&A(FE(`u__lP zrb0+-Y3V}HrtBz#9IfMgrn6uCpfll5O1v7^7YVhyt_Gos=Fx|hj%|fkxV|znpx&FQ zvWACuar0&=j2kBOz@$-6wgYjlv2Lty*{xr2K(zgM+i~(A!47sLvs`M5x9>(T=uWVX z9t!d8Lsvo;BW0;WE&`yM38+nWUZ+!ioxQ<)rxJp!shwzA0iSltBOq8ZEZ^#X%0JK6Mk4yOfZwi}CXQEA5; z7#TEAlQA!Sfv4qk6MOX7r)SO`oqor)8%-;8$Uo9*{>WX*UC z$oj?G`fXO|6Z6-sjWd&#(aG||XFu5<-0l!p+M#kU%f?gIcl`kSfGLeWAd~xI%kVyL zQC#bZiR9o{dJ_ojV@c3M3Zys5OXIGheVHA7jcT}*uu44uP~6qX)`7eh&A6TjCiZl- zNhX-~u9cqMQKe`XIEkI*Zn}I0^rt#%8G~kbP0e4_t7rC1tEF~1J2sd&I3HG>RjV^| z%0II1MzyTyGDh}>pc3u7g+(Hlh@p?opPnfRAhIXkyAXt5)Bq^^H!>QX???JvF#1Bwqh+!jF*lE2}&0Fyn&?<9Y-c zxRAImRUW(y&{1Zq6UkA*V@?8Z6QC|2%T$!uWULp}#gz}ObTuh9qiX-l$bora5_S7* zC(5`QTS!qDb7iWc*Y_2w;MvS-w=-}`O@~5#!=Drj->gm8`$Tn?;KuiV)1A~jKQT5D zT^c!z#^^%VyOc*c)QvyKY~5O%ZvO$jY*Iu4%@TIRrpqobRoF1N|N1C$WGx412_`xX zWRgWI>F`$ZBd4$|92>Y$$$)28S7_q&7IN-ibeYa{x4^eV=M?ktvhvFGi2eO*-FPUe z2RR&yvk*K**#XhE-}YibbD1LGng0v(NAFNE4JfqC%aTb@ttyCJNO#0#g@~^Vh|4VS zubhFHYk=ZzY$C(^^%w<7q99D6j%0ERqs3(j5Q_PBA$)~Rd`Vm9X)c@B!fLb-PuLOX z_H>o5HVDFjO!a)>%)GxuEk(yhZfZeXmU%R14tJ!{ke-ZZ3O7fHt}4p)t^Tjfg0r5(s$I3q-`6+ViY@bh!rKo3uoIjoYIv!aTtlsdOtP#va8PtZ0&`v zWDm;)53mQtw6tETe5qS6p6b^PxfxFSD}|S*ge35QK5)A63OKNP99SissQtrjqEhWB zK73xISf2zZbcT)vkDzUsFD+`wCrVN*KwKg>&XwUckH(BdJ|Hndf>U{-yRk%I1DzHz zgj$qC4KDtVtkaZ~0DBEaqo4wiEx>7qc6rSq;l;xwqkd_!qaLw>GRR2vdFs5n$U6Ga zApguV5=C92%Dak!Do~!JaTv}7JS|0&Y>m?ZN)_warO|7;5oM%Tc^r7z{47BWMz)%= z2`Xn0bMto@yZ--QLSYbD;r#NqxyX0QYwiIrKO zE(}R6>FPSD3&cis4!sauWf`dM^NGCxv?pvVpN1QNd$CXn*knus5kCRz<$D-nqYRUM zb7~|>=E7)Umq+H5cX+1O0j!)|c~H+ds71)4%ye=#Pt6Gj78&!#yzRQ!gF*caG;>Wl zCezuMEP^XFg40{28|>hl?V@EiQh>L{jt;Z!{?q6@ak0`xV z229@sj?J<~Uno}yN%Nr|4)tz)S4NNp`fK(=P!qisDN0CYn9>(k^x{zaQN=2DN~Cs= zS6Fj)U5JC;+c6oXcqQc5WyHA%Od&RNxn1@r#79P{2$zO7hL!C~Otg7HX+Oa1L)N|m zz;S&Y{Es1zsRtg6^xm7_s+TNzDX8s=MEn={_ILIGk8{!J=zi>~SvMCamv*8jeG=Q?h`i2VYUggmc-hvA0 zV@WjErw>oniW!?lKFjF zVxBs~%y{rlRDb8MT`iuUGk;$WWE(_vs3zpx-<|hmDqc{ah{hTIE3xJ2yB)C31EMRx z->sC`PE{{;+%X$D6It~D=1W@-ojzJa_0;e8EjPc}JWhC>j-w&>CMBL{Wo>f+*qiq@ zQ*L~`A5&unJ+QX{PmuQEBvw2M5o*`Ovy8xW5Ohg(gVgZENA#wmDZMHF^1hu`J6`Yq zy7e3O8vC-sd@Ji#1k^7q6*Nd4XBuK&`Fgb!SVvc`QYkBhO2z{a$lvGTr}xUZgPT8d zWjZAbGIJ2vFK6pd*D>dE58E}{V^QVsfHhg)yTxw7?haH7^>rzN z&EoQ_1_zHqJc+N`vf-RCxN-dp!H%!&;#gR;#Q+m$p|WH5r847KN)(BGRYDpHp^p@v z@mgOc3@uyjHg!N3(5872oF*6}0KU?Hq5Z8&`{vkSMD1+a^3Ai)_nqg8IqWKEt0oqB zeqIt|Cc2WMT(ZvxA{x|!RI8-@*76eKqCFr@q4NuyqGis5f;c|Kw&*Ccr(+r9J!f_i zRSaNVrNyCgQJ*o$2kHFq=JbE}*%bVf;Pa6gMf71;pU~^c1ddG$Jv%}+k zee+3jW!2dQ!$0SxTJ@7V{kQwHe`x>lB4uoN=-cQ^!@myxbdvm;zhl$CbCS2Gd|Rn3 zw=y(KQh780I@x46E8}9FMPE9+0)I=7~i4{5rYF z#fzx4)Bl<2ykEr=);V>_Tuh>Ts7Yg>=B3LFJdcdu z!g6vx4%qBhx4WEI|8RPs_@=92IiynJx(iEG1o)weO1OJ%zS1d;b>jPm6PG1zl~ek9 za6DT`!Rb|t$`Ttc{AC6wBydtyD~EOhaY^j zdUO=&dE2>EOm;E{Fj%=;9{(Eq@L}`Tx4*q<3RU0vCFN}U)lK8a%8q<>#d#U!)t?*A z7}yEio6`BA{=jmtMimZ97>o2m>Mf`g(b6O3ly`G;M+ zV`Z#q2GRWdANE^TZschW{z-U+*!<4*(By$t(xdmh8FEUOUH>I_F@>vKk8(eYv&{Ai z?ON}C@=h7pyVt#7 z@dD5%S&TSn^AX4sy$<_Y?-7VPPjtQ0gNVK0km{b`08UFNhWX8F@tPA(v z-~SxHcEan%g8==HDaU^P;xnu4N{vIPln!RFPDEVukOCMDes49eh?*|UR64YE<`;Bd z90}(Xnls8)i{TV0+}W8=4{~-RD%CdzSXvMSdGXP@}Td{mcj@`^VqrQ^=N|9fzk76mSJZN_8Dob;;18x)Exy5%_8?#k{PijS??7 z;3LLVv}<7WE_Z&q4PgDNAR#}#k3*KN?YJX4i13aolQ>yVtA%58F&I#4-OlXc5WSB{ z`>HA6R={e^5(gjr$z(3@7T?flYIvkPOG(;yi7(ugts(86%7EDb2-PQN*m=0hLm(~k z1s$JIA)9UJF=rC(y?>xE-#UTz(SRb+ z-|9xmZ-P=x=td(Sp}<@<^DZurh{2+v2B?KO8bc3-;|xUvxAtaLhp2sEjkO8?vOAsQ zQi;Zyc(WsFcyRqIA&rHL7kQKGodg@8F4dk`rxFX$QO2cjrG)0Vp7wHjx|PF9y*ixPi!96~7G~=x zYA=2MGHRJW)>1~uzFxovfw(?T^uEotRd%(!`Aq~HNYIhHRHDo`S^+7R5`)piO_-?= zDbu_VucXrxtS)>unlc_T{AbROo6J<_A8*&L&l$~I(w z1aMAXN4Nd=RmEw!rf#x2bMqH3j)YkUIh!QyNWweslBt7 zalYD$km3#n0XVrdkGLi_qHJbt&M3qo%lh?%1g$ro1xa3|&H&M|SzWl9Q;mJmnSe3h z0%pvJ&_!oYjt0sNiW>q&9rjqE1x_V1Qlr)2wrm;DHA4 zM6VI?Hh}a^Y1Xb1u6{Zc>H3cJ)7Md-Bkv%z_c~5gjd%ipmFW%&^i9`xiNQc-SOrC$b_T~J@;15y#4ae z(!DQYwExf1Z4Qg^s=+cszEpV=wU!d5!I)78d5R)tSjUo`0u@YzMaqUPB4G9*{mP|| zi?DKdlO5Hv;N*3s%vQ{dlVsCmZF%bIJf*wWl2As`b~A?Aff=}alm*$iz6h*dU-58n z=P)r)lGvK`R@C`OG)7Rfn!fVpQA8g7lScJjA_z3ux=LW0S!x->o@i87Yh}$ltzS-0<|mD6;*S(F>Z@vAH};`*lI^ z!x~GQE3C|eSRDtp#_EUY#LMT6W5#Wh?(se~i zbge#dhbls>l>#%>UXZjqgm{We)M5xv88xkNLWTo;r(gsb*+Q*7!`vS?gaR6_3iQMz zo7iDYeQI23H}0Fhlsz8_cD6nyz%&c`eFwg4H@MAEwD zj~QMcLPS&NvUppl1?k&10vTbJ0&YD8I#|lS$i|UL;z6fQbE~#4r^w3+PqqLoOgChE zn27aqPd9mw{Xq>SUMn{6XiTNnNQkZk0-%a1Hfx$c zM!X<`u3yC_EI}YXz)AVk7{`QTgEaiQ7e2bo zjz1~M@~NIsRz+tlS|zkEltgDSZw=0;r;)&hXo3t`;J}r2MuNN$VJUxbsDp%&)M=3i zzXxC~=6jS87pMVdApdJo(H}OP`w7?jXT=`L&~>$?LCx14@IruKJ%Cf$Q#v&$!PsZ& zcvzG>HB#p}Sj3ts=+BHr|#aSqu8NOU;{cuPe#$)$<8@|0OYk9RPA6By{;rF)2V2@@ z55pXxK+h+xG?KdwO_V_sxKxzPd(cJD*p%v5^P|_Ofgg$^f@%cLyec+H42>)4$7?vr zkou6ybf0(t5=gC3UazPwzMC6xtiO<+zTx#?Wy{=_D@<*luswC(e)`j=;;0L*x1x{R zeM#z5Qjnb0xsI?$5$tKR?v%L5#EI~&YOL4G>!07yyY(X2IN_13ZQq{bj(pI3iIw7p z=j%*MZQBH;AfLscg8n@mc_{I7d(=5CZ^_}oSAMtlJh>F~O-5G}b1yom;Cj#x5(|!f3S-$!q zcmC3B_O}1$=&t3?0_+T-`c(yO%mE_B8Td*Ufpf$Xgz35~258>Ybsi=GLx~AX_`tT= zJQ2JUy^P#`e5RiI+n(aIuaBX-^9se>TJXPn4s7I(pvz+?YwmUz=eZq-vGo%s9eVug z{U-GEs(y=Zn;w`A(;&zhG{YfSY6HNAqhGQMU$7f8@7?cx$90%9FxO(=_D{2%C(8G; znDE*}VXu(IUWM@URVhB8^R2#L2cDaMe)xO><)r)r_j&6J(w(~#UFD@ezQdH16!5E# ziSLq>=mOVbU_3;pD}_hqJ+qxcMW_08dz`WBXA>kEqBuf8lCFM)#BmxzJH-kaGMTE ze7o_&Mf&>}Pwi8Zcwacs8=({2(3+v#Yr6IZgxKN@$RLiF>VJP&Mqbf2#-r98bvGg!YvMN?25+rlJ@NT0c>h4^hT zrrcR62|+|~VJwXZu9Z9A`EqL7k! zh+I)$JT+B*@!sbj&&IvE_ZN)0vpRYKkob{gG|Eb}C&+A-+FkD(jBx-2U^3c6plR^&d1b8@*0~ zhb^|Z`B){4yz2@)QEvXZv8lN`F2G{>W&6+j^ml%vyy@8X zf8O8Cv>|j9FExLxb16~tdk3MJPyx~7Y-73t7(sM#&Gmmn*ipKgR3FgP(j^$qwtv{M zusRpLe|Y+8)9t-RHAm!*@{(a-Hypg3fBeuHslkP%UY@~fu_m-85jKjmelh?3f80#? zB3nI$G4Ax*gaA=0*o?HXOaodYKkOfrEY_ZlE_$tzICUR9U3_6L^y*Jzir~N50fL_ExEUkm~f1ZQ94WldX*=R$P>ikAFw}`{0Mj&FewE1uZ~- z5XWU&{Q&Q43_rcil&ycd2nG#m*Bu%0$UmR~Gn8l!4x#j3`_U4V4|m~Sx%RZ~>Ge&) zFOdezgkLXhY29-9!NjpzyEacHbf_e>@iHVni8Zu-DT|dW#S%t{UV}|^IFx1Q9Ohv` z4EH!K0Um#Luxx*H9yYmz2<%=RU@odWQJtLJ#i-@gi4r3>7Ah6Ep6n5Y8`>qR(LB;I z{$Lv^8mXAIkYi-kj zKnhN6EGs^(PKCCA#@V&`)A0WNYgPrhP9eiI1J#&E6ViaP5aLRQNsf3lVh-B2IsB^^ zANU`6vzJq2Ek?{hC80wy%dD}}u4ltpEbx^i?$l8}#eKLEppeJ%W@QMnb-bBC#dGbOsK`fU!rg^QUc^Q4% z#odui`@~uD^!*6{7TWrqy(cupv;0XPg#kGm|OB{aF0CPl)ci z8Hls_U;5SN3owf=Dk&#Tf6HzyolgOqg$WwP zR0TDm5%8+)6{>1J8$fXTM>fOL_QS|l(l|88b7&tB^97kW3&I|DB<_;T2UgIGH^b9l zJLM;*unzcQe|Auyi3j&oXM|Zk1ZcjSw8qMH`o#8|>p>v*o4Ua>TXGdQILChJb@fWQ zt0}siHtMjs7D|5;=jk$b@nvY|_0vs|!NqdxcS-*3>D!=Z4AP-cOh+-Hy}wWw)_KcZ zmwUSZVkSM04j`$f2NB-J^p>TyRCq8sctQ;)Yvd{^aMn|nt~eN2NAHAtY>&RQZ--a=dPOQN{k`t?P{m%I{BWz%y=5RBzS9pJ@+qt#kh_xu z?3$4}p9YN+#NFo5Y%i+*w#9>fdf1tK8iEE4^0RW`pRt~ zJMzHK+vn@|e)ugy;JlKs#o}w{`bU-678N_Vn_ri%V_VJLximEYiFP50@O{lz6<9^b z4L}Cb=u+%@%h0t`&+d(F`c5)EI^=fYZpg1W?Msm6>N5d5j-8%;&K<^hz+E8c;}LRS zw!kq2GFTWx4+Rf1oDH7#9>sLj(nSGHI>CWki-YrlyHa;G)j22mkDG{(?o26**!Bl=w}6IQmr-d+hfk0-6F z+H%eMZ*MyOfYATSyxjK#x|%AwY-TI;rV(W>#X;{-i^WH0=p0C7WO)+WI11N?8?j62 z9PI-(ZQZXEcrWT`|&KQ^H?IXJPIA9A`$~%uxX(G zkE%D1ODb)^{&C;X5YfObKt$Y15KXZf6&Fw>_uO(^a3QmCK})R}R}@4;TmsW_L0r;u zYq7F%-%?Y|QY)v@N=t1Sr>1G9dC&9t{C@BAy!>&v&wqU2oO5$u_jP?QwOi)(?hE=^ zU(V>{mR>fO1pRFY<%=cPIWLbSyuV*;7=jwbEr=x63oXu?%0QO*h=|&qabn7fta0Vo z2L^WXb*t$x*uFh{@c#Ok;}3OhZM(i4{7-U@9^m$%L;g_@oroBo65U^Ue!w@7i{ESc zA_7x@E3Ug+;l&>u#~Hu!vajM1Hd-U54I%x}@s~9;$r)eXX?78f#`hwb@^b=1_p=o9gv_RV%=l-sj?c~4& z-4puAze0HzMB*Ejr^t1xWXsZzG=j~b5R@&=Jw!e7T3?FUSEG(&kS|4P`$5Cxn-D4@ z$YJIf3TW`uB}>Mfo_H*zAO0o$Rj+qwA4!GUh71AG5@s_6Z zmrlOES{@bR+i-Y!x+<4TF=RB#&>Y|=RXILPohOu{E+uuCyb$+wigBLMMOb838OeM zN*!bGumpJVXmN4PHZ-iYHPD=NVG@I)3XoI|0|+q@5wV1Lz}I2Lu%ha6*9mqELLUt) zcZV8uzf4{XbV@|V73<7|pmm2B@O(4^s?xNu8MOUxV#oZ(lI^w~y?Mb6Fx0{hgb%*Z z{?DN=f6M*mlk_RoZb)l|$T?^@Vq6cHiQS?w(6Q+G{4*Ai+=fPmgPXG^M;EpS=E;jP znT^wzhS}mq#PLvL_b8_>(MgLo<^~btrAE=05>4NRMKy8L(R+hb@K{=!n8#=HgKXDx zqM#43ifG}ZpvB~A;p9ZTLs7gu(-$M3qA{<~T{MeXh$PS%${^hq-$LB&^Y2D`ZOh%^ z|9f()2rk;_w8{9x?eI7)zYuA7-kw*e;^P|S0c?w7UDo8$MM|Zult6^`2Y^TIIGW6B8nh*{y zcuULGMw%ymJ)M+aSLxec>iX%_siqj`rbX5k24VCCh9&hy5>M#Q^v>heWI(rH^vmmS zBFvd30Ib*>_+f7NTIDZK#*^SHlSFm$0>y@e7!CqU7||2&)RcycvHpCsC5h{h z%AhndaZ}dgG9W{aSyu{Pn9tdB;nkWOrfp|Cy?*cChrj0ZAYIpLfJVK-( z)IiEPMe4RLR5}=DH?P|0BBmL3r$97(7a~K*8!Zcy+p~u!>sc4`!qX;*`ib%`*M!#8 z{*Vv18z|Crp}|ONDmBE7QR;D6qN*7RM>l=6)40H>de|&-L z)Jh?{eGW2&hE%a%70APLV8x;mMrOy}qFR-(i{` zV-(_n&~{nLH@)s{9bP^y8oo7vY{0AP$T$q*UQ-U~78iKuJfavN0WICe_$?v zTG{b2D?CIhm&(Fc%G)hKl@f%5YWNcSD4hWpZTf!Da}dR}#(c&qw#LfzkB5s4%P>-& zE{8M_5#Qz}qQ*(q#G=Y-jbThV6)-C#3k|deWceL(kql`=A3POXQi?H{zQGycMM9`J9J+}xh`+KB46MRLJn<}Mc3sOM zNHfkj#(CZaui}IEQd8qn^BQBw`H?lfHagjJ?PLx1RIeq8x2|IVImw@dtM;f6Xo?_z z^M^85wBKgCVkaz+*SWDp=Ns^to{T731E4{)dSHz?IP+r2r15A_-+wB4X5n$p z?8#}K%UD-O-17Aj87ronnKBdDhc1y7;~Vp1H~w!IM(uwk1T?T%vC=r}3195(zIJW8 zFgL$s?cS~1cW!lbUK6*sU8)J0_@sQV!?QT%&GpP9s=adR8SEW z!IVmgsgi9$=4JduigTP5uFc9&cf%W6we6cFGwGoMJ)^**9Qdb&)N2q8L zQoyPi4uYe~cw;D4-IjRp^&7+07BlGV}uS8 zy;|n(M-M^qvJzwko2|bEfoXR(a{858 z(|8kAQ1*oU#PkxC&BF(+%VC2QVF&Li>KFMBIr_La0owT^zAb3oOlI}WRB?q?(DNJD z7Bg;stk`R6(m9a$RCU8*}RZu!iF4;HTO6IbxzoPdOmvlSH**f~0m#aN3uTwo}O;Xovgd&f5{Z zDj3#$ogl?*nS08D(#!*bwd%rCznvT1pDS)$EndI%y8g-OAG;R#q*VuAZdqH3Eof4u zY>w_v#0!e7RYXyrzVh^IAH0LPQZOAwhQH& zSwG=Gi8?4axg2hXr^!1gL)TO}#*nEpB+vstHo0XL2+o_VGxM`v!%RI2P4yR@ey|+n z9cxV+kDmrTQ_A!hfCTPjPLB*iu?ZzeBpd$%_W((CR;lgcD0atrq2Ro0ME|mzCnO{4nKS{Vmo~+^L_eUt;y$)S5INayXNjw z3!TgsS&w?%q%gwLpBoW(_Ij7Mx>?u8JE>JEfG~Q~O9=w?*e#m_WgcqIpi@8!*}FMs zr6aKBgwsiZ`n-Iwz55fq@x__u;Gzs(ZtZDZG-{9_D96a5_m=Ynm_+PaynyNECXFAB zQq!&+BhHla0Do*n>D zRklJ+2gR2~Qqg&zsL*cPf@}^UxT)4_e6ppHu50}2Qy;P+S#YiN#5Bcv24FaA)K<|= zGVHH+!3J|isCA$x9*yc7y?*2?KHfM=#bg~s-KFE~A!E1y97>UXvZ|jt73^vlGIrH` z_-WbtrhGt^Ng{&1C^)U07U#8i$jDle)u3V_$j8003mLi*MOOXVl1&pFT~LNjF$lmW zeG8?`8wr7%#@WYGfUr@#)g03SvIZ%|RBFsKRRUI$w}`%GDic;huX29~%I_hewE^tk zfw}Qx;C~LWcdoobs;UP2hfNTsA{1|7BIuE^Agut4*8p?fi-#VHSavQ&K?+=f{sVXC zml`T6sA>vr9s))d$awsx62*gv2Pe8UR@>N8wC0o3Yi4QIaImKA*4anP&vW_1R81KH zr&KW;xc!7A(v0hJcG8hahX6;FI^~c=329?M1{Ct=iGl>h1BnW6wLE^*z8CVMdeAd8 z=dXO}vHKyshkK*0YRCVDMtQtnd~r4s<%j{ORFM`%4o=B0=dF69O5LM8dbQm+c9$SP zxv9C}xs{G5O?k+w8gi5uj9sVkyxYkXDgi$k7p0q;Z!z>}VIg$tY4oJ478EJ(s_;(1 zacQP63t>%_tP!dAq1iq(2E)qM+UEKXl>q$_v%GI_e zY1)Z&xS*O=S6-jE@)lD{Yo%VHvhfSaQsS9OkucEL*yC!0T#Srw*;%W%EM6qhjeSpT zbI)I}!nc1gLI#$*3<`!;<+84wAYaXbWLe39NHnv{F1ac5w%KEm@u5(`<^YBq7j1>t zGe64@?c9}r{*Z&00g*nr1UlfEpn>3!4!xh3Qv9#2_!E#%{ylvDV4 z-D9B|bN*c1zt?Z>P)~Rm$zIg{z{b75e#Rm0;x3z2`bLc^Um z{&+?D)&n%+c7W9Wfra z2NVH%2}-_pS8j{sL6djX$1RZ3E}T*@8OB7rFTCZ;Ogz}uOdI^H*mNpm&z=1X=AX-S zd(_s4U>$K%i~cb10$PFq12di|n0&DmT$VKl`g!AovaQeNkCb=E{y2N~e@_l=DtqiV zD6BVN>bJ^LoSdDBVA;ZtKt9#Nu3Q~3Lho6Ajw$EU&OGN2GMv!l zGF>ec+uy(WepuGt8Q)Pfn*GN=Pk$Z9$PbyPK3sy2)7VJmhB7PVJiD%2&^0XMcLl01 zu84`HdQC8-$s1xD7ic51cV*S_b#~XofI}%nloi2_q z$QrNp`>z>CyMeNAY3XyYL1h#Ryv!&qgzr_EtSl4U*K!hBgi{*Rkl?|l#yCZD40HeW z#Ps8Tfupb3bm&0iHX3#rP!PnqW~E!ho;;Bd`?{y$fjAIXW=!l)IP2|LPpIXQ2?(2l zd@T^f6y|I%-BL=G=P0CAS={mK8r`~~-Cm ze~Y#B)1`tAu1(nupQE0qVpU?=F8LFTKod?DhB#Ug+li)Wg6B6QL*eU=;8y6HZZ06( z@Mwh^1aAGdRa82OGB(I)(TQ&3_)^uPFEhS8pBv)hADC>KHT{X~kYsD^RF;0p$V&BS zoewP%-cVnxSKxefS-mt_YaUc}m7MA0n+5q&<~14gJ}vMsa>o&h!Asd9>&OZqZlKsP z2?MY&tEZ_gn%K;THa4~2r~fz(J(15bO!M+;{vp+qpPl_6ATvOIFAoKB zlEe8$MdOW;ID2qnEMWQ*H|5p%*1+7&LYIQ61zeZs3~&ne_O~@Lu{kjJ^id~|nh#gA zt{lW0R1WfW1oGv29j9?}Ylo8+EHOhr??3GSx{_cBB zze3xKlY4l8`wy(=9i()>-V8<+il6UQXw(T|g(#_?n`@tca^$_+_VtJtoj3%Yck$dx z^s(&{0(aI6{1BsXI(q`-7#j!sB|SO4N(s2nHap9>n}62uac6UCH1t}9r4Lj%LkCfT z>@U9RelZsE*~n^0vAh0hBD5GgGSpsqyG6ALA_Z)JU_^6kvNV};kyVXpWNa0+Mk31w zg8;M$qFmR6rsVTk5%u|wVbSaZ+KjK8r8m#l+3Brs?z$z?RNON>XCChDufA^Jl21|5 zQvAdkr1MWdw{xJRbR7sU*H``Py-JO;Tv>jp!mxNa2t=wqeET5i=bO|{rDJ{_$^Xhu zK1+md4KDg8JpI)%#pdo><)OWT0uaP|UbZ+&^^=BztE=;bwmN=~t2m5wqHuPj<-|0Q zWCW1F6{vZqi<9w~qY+16&D+}4=0Dnsf)mh|ST{O!crZtRxjp6Ae*KQ@-Jb8KIZxYv zb)c81aOA7}gO*db!WtyxY)7laC&{`*&2kyGY>`I6^}^-F5(fx~M;lWl32hblv{-%>p1DdBsPGC`%6F_({;0U{S>EW#L ztYfF3WIHl=nSDpt$Wk0x>#@^U5GTx2)O%?_`*y6DZGDqX$ z{6L`W2mjR_jDQ((YS(WYaiO8Lp;dgP`&vtLi=?^kT66oA>&@4?ZueZ;YufkP)mux) z`fNs-AR%1dCK5k@7X@je6;E4IW#!-Sr{<1WwCN9s3PZ(7+(MUnbO ziqT{9NfYzNc2QV$L51#x)1PyFBObh+>oM(jtxFd^q7B(1el$&BycW@`Db6p;|^xvQx3tPhV*m@^rr4#>r+Z)7=1mfSY$NS;D2qU*{r5zD3?-KH)`=^pa(c1lecZ ze4D$Q^P#mVTG2f2ec*Ba5y+A2tmz7UX8Ex(e*vLT4g@ul;f_J0>R+mGk-4?8#Z>P4 z0-VcaZO}@p%@i;wFJ5+$1kO%3bJpmf5T50oQw#@nUH)eky<6IJ_)K4bDK%p*_jiDg zQzNh6cev)M^Sgh)X8jSDe(e!2W@PTsvzXT~$df;I==MtIXdzC8QB@RG7)xSpL=}vH zer`5(!XNXr*^;`qIC!aW`$F0xFC209iu82>{@I4N`HO8q`ff!eL)_Jopcdjt`>P;t zFjIHE%i3mc64;{>Nh3qV94F>}`QQi^%QU}Wt0PO=*d%xCZF$Ok=P~qLgGn+Y*Bg2p zLcUCM5inz{_|U4yoOrBQ(oJ$TA+qnN;MXQ2&9zRreX=^#$I5U`4`hsTLIJqK})U- z!ST;WzQWe%j~^yFL^D0vDd0cTQImEQ`6p>&2Fkzvly>jij$HnZ zh_lJ4DD7ZS3_>z9Wk9Q65FB^dtLsDV^xboAFEw2=IzE(XEjqMK`B&QWRQT+$(xEV0 z%a>vJ<*-PM9nSPEubb(UP{WdNc+paytS{4xmAq&PS#{IO;ld8U;#WHrdov+WuA!Oo zV!|uK*-itGP8#?QL;thcvgEm5Ifq}WHR)_$kNk2r_5*NmTEpDFYMyHL|L!9()A=Jox>dm||E_VZy+~jTH8`?@UleMvUM4d#OtK0qS z;3<)~jR~qNEZ-f{`>!otz)2gP2d~0zpfyYADN3z$Gk@J+^H}WP>+Mbq6Ff|X{+gJUO8g` zLvzqTP{Ad{^2ll#9w01ig1Fu8*IKcrFHYv#Vbd#{R~H2BdlhT)?JW0sk}|@=-Cc9d z%#Y>Iug1|J&nDmN89spA!` zyi9XCvd)fNsmJ2I2-v9D7GPQ)cbrvqp;nzLHg?OMEf3goIO-VbSL@(Ci_CF+ajQ~s zkjs3$>aDM`#@KHECG@jCx5Zm>C`MSy!Nvab!<~)z@%cler(VE7LDw2>PDHt2GkqIb z<4T<=WSwEiHq5J>3R*qLe4gB#c-72pX4gyi%v0vYD_kFI&#_J=S)f{8NQh~JBmMJT zTwgjJD#}+W^RBm4qW_1-%_9BUK;e z8cJ2F)yr}$J-?0ot6ePA#i;%*(qb-o5)BqrS$^D=!gHHdJC zRhk>Nf3NgV9}Q?FPWZ(16}G@-xyu?ryh;OrBj~&5X>e55mTaRFNVQ-$M5xlp#}Q?7 z*>%XbGaHpjP`C-Zw7^W)&mX$LyaZWlDr6k~{9%~1muAh_vg)1LKTw3ZCLXxr14G}L zul@9hTK3h@Yy5E9qefbeeDf{Y)5wu&5|CfV^Fnfffu2=}<1fijf!?Mz=-r#uLIip*UI_TO&5GsJKBLiRr5FFXuV$mmfUo-1nw*XoF9YM|AdrcFKO znYef5hW+=vOlyzNlY=ksZY+LiGgB{bt5HXCW(%Clsd7`XelzhH#OG z8X|%1O`5wD$w7(N6(Vv4w$|HAUCebkpKSSV7VpWt{s=xD3A69q2+F%w?iC^rLfdsTC!<@miPXcycbsAXSi+LJ?iz50I! zZ3bTLq?iu8San}0wEI-*;5CdsU6wYdw-`zr=_5-*S39|Q_%1I;+A_zQ!c3p z1TmU{kqok9{S#x^vcR{>Tngl7oj&l-;k!T3hHv;jy_2>1{)LxQ`nD#(&EsLB#}pwh ziCSqEC>=MgN{B%;_-j-*wFrATR_ImDD`(dOc3t1?c!0kGX0ujctyH&GUQ~$k9c5PG zR!+YZmIRSBiCT14N*lIm(w3~AEDMEx;Yjb}fIHGv^k`~N;upE*o8fU{~1e{as<`^V-Iny z>c|EP{3sGrA)D)H_GmOV1$Tjw%IvD@rUw+M^O5B_;5^%HoCEu$t!}o^&)Fzb|0ltZ zr#?65HOfS2RQT3EbY4qBcryl%ZQAbDd?X>JtOg+-zERrQ?^%zxHInf?~TiJW`N8`vqbWdP? z{Evsq^LN9C&%8FM92*493!%gD7(*8TRAH_cqiT)1h{-5iqFO%@MUF(E4}859eeh;u ztd!9LfQyNlQnD}0rXkx{tt`I8nKnM*;)ct12CE7wPSlmRcJZ4m%n6tJn;Wlws!+@{ z-S+nnxYYRkZ`T(8ux5c0I0Ueb|^ zGbA$~XP*6wczqiHgXvQUgrLs+`UZ5CW#gf_yic``_I!t|i^gciUArcL3|*zn)$9sd zF|A*^Zwe@t&W!Ul5f%5}fnb;go9*y?eFs-B1?X9L-(cG)_hl9voKbHu26 zROC`Hx|osw1ZM(;5c$XmX8zO=t$^WqN=^k4TIrJaY~_tiJmY-ssQ z_;0f`Vh$sjxnwSvEp94j=L>rk7H8zL?H+LE7A+lnh`X;8kx9vG{=9?or znd@S10*6;WWX$0k4r$@Ybx+OXj%gE0P-RN_SIHMvT=|Edu#dm6$nS6Pq$_4qR6$3C zD^}+NStERUnVSJEQRr??ilQ`A8gE3P?qUMQTlO5a{ZFtF%Er5DHR{;u#MU1Dq?+HZ zA2$PL>Blt>+JS4!A|EEVBwHZjN)iL?4`2j4RwJj{)=h4HGpP-T+}ZgY9G*SrUR}ot zUVXD8dB#s;6pdH`Yi-%ZzjhiG8QaHvTI(0^+ZHi3_UO@16J6uNT#d3NuSq3&FKZeb zQZ}vOSVD%0O>50^ULlhMzfANifw03CAVa&Aq~VIT5tib4?VLvSgS1u;THJ>qyh54j zq4L!$Cz7~{jxD!jhgyvOGehV=lwF~53w*WNwz18+T5zL)4ldU)A}fJg!E1APEh#y* zydkAPH1#V)mMgEM`OS^+?$I-XwN8k#Pxf}+2CZOhuKrw6rEQZ5-sni$yJh)XdrrQe zxjLi2b;0gYUACf*Oq&E?I?bJ+6izR^<0U?clC4Wq>G|#5=8&k}2E?bhkySo zWncg)qdxcWFuc99wY8=7YFlsnmCoxu?Jd{ay1ILAHn)Z^g2iSc1qz51g)1$ z&gLd#gXF~*eqH^gxG(Hl>K!tow=>f8+pM{CBbo>K0Cgn+IAr zeTzN*+VSzLJUCre%{2@9!diKqW=|XIV$U%qENEIGqiNrjf$qNtGdoNW1xuD$ePgP5 zV1L88(dU01GJZMg+ItQDGzuzXDL7WSYVOxs1L7)m1re(#L}lfokTMDmJ<<{H__CsP zT3kyoaFNxJakLMmvINxT=v+VaPdAOmyuKt8vpOx9<6RZfTD_{kq_eDz#?nl5L^2jfZrj~H+=VM|7%Q$V+D2QJg8DrrL~LiP=aSRICwa1y<*huF>}*}v3Bm28gTz<-+Qkp zG4Qltr^)=>+_$7({?^vtr#8Idai63H8%P9&A55T^IW`AbR<*TXv}vZ)`L*kK2H4Wj z`oQB_Jqig@(J*kS0YyHPVea3T;(O#`Y*g8h%fd2&m2P%|W<3k0Dl)b3Rt^Bp7p^Cr z4BQdOml>D%R=J#>GJ0yFF}Ah0tn`kbWnk}6KkL|PZ^9>TcKG2w)q775{t(kr5Sd=98iP5LF+RD`M;mL<|oH(T}xsHI}CiNDjjA;4RG&)X#h#_F3 z9S8__VPLffAwe2RTUel<_oLXkzWwKR|Afa5-N@9MD&wwd3l=CVsP=fo3NEcC!_$rB z;xyMu<9{Dx8hMmcL%gK+M-_YCqd&}kY1raF+J3HRopC(c`&LB;RyRUFPIe#%3c7R? z;kaL35|{ffRCIR+l!CH4tQGd#6wSFv`GCF66sEk!3seCabA8Iqk92l)qu@-j7fq)n z9zw6NO&kFcV3L>ILL64Mp-Du%4wqBnMJJM}2CM#!{ZBz!iJ^o%F`j)6RJ)Y64jqp~ zC)N1sx?zWdV9&3+i(Fg=@5g_>GEmbcCAk#2{Sij8QhpyAwrw5%(o6pI={X{>oVKEj zMPlpZKy(YaONAkRdAMHI;!zfH&vl8SPyA-v<&BLIU7x$eDPfz(sI7A22n>gE#^}01 zqnIP}qju%zQ)nqZ93zw@Ljd>jMN}t?7x=tdcx++4X+oMRr@W>^$KGH-l{=CXUG>nB zodz5rdEp_%6+>)MQ0L)+_4ZXRa)DdS!Q zREmjalZrSV+ld(E4pTEYw%_FwJqoK+LiJw7j=7-C>_`Tq#Kh2r?%f=+yRg;+@zdgFud2;y88mn(+2CN{^7IvElPL&NSnWY3&{dDL z*n5~Ll|4_rGOvE3R66N|`Sa-smW{vJ+C-VzVpmQo6$ky~>X)x(-G`ZkNO`rE+y$b| zu(*6#17owu8-`mi(3-p5>#ua8XXESBp20&Oq<-fv)?b|79eGQlVtTL$tic)VjSp@n zCk84czjDOqRQ|dVq0&B&@}Z($%8cjk+KV3oOt; z_z_X$5Q{-VY56pr^rRQDc@k|%0k>8dQPX6pTcf#4QZfDOBO=m2ws`FMfL^e&Y?w zXC4*;Stsj=XMQG3c6?BE8jIU(Mc-%0y@ z{vqUl#@@r80gb+uysjLaz*>a4aM}rHE5Tlv$v-JbRczAG5^Y*pyRsP+sZ4ZSd}A%( z8l-_az2fzU6HeU)VP9Y54)OCE#S%u8Bi_Z@G9q84|l^JD)-;~e!A;uYf z?0{Jr51(GW{9^8@4~{$+Xg-O2!N+$FmP;Mzo4Msfe=Z7Af#>eW zOJ$&2@nu3W>T1@?ij8j#BQW-iG1qO z6A|6fCW)lS*`PMDaG~wrb1cjqpp4Am)T!c1iv=eV`&Bl%wnH+pPt!?k(=uS{e0TA?1h102aLM!(CG_M z#OjNAkFwoCzy5>McC^a!8g=r-s}wpR1sgh2Fe)hFAE2rFDWIJemTA)(dFdw_uEX?S zS!kChKZ-bsoofh9(5`j2$fw$jv?%`3cinqe^rBrLXM(4?;OcBz;F6|)Z+!V}X(ZWW zbU9pQz;shKA_ClV=P32P_^}b8)*Y)*DKSBIthgUA{H>#e+J=pmBN?zY)Im$Yy@u%Q zi1Rw#eBt4TPei#Jf3$Dz>c$Qjk+cLJNB(wDp4Imw)fQDHr1FL$hRdKL1(zg!8ZSvx34SE-w{%x%}&} z!@XdDmQfiAC!CnFt4)uM9oSY`J*Ha}CE1hw>+37U_oR+6>vKilY<@gW^Q5f$7{8`O z=i1wP=W6A9O*=ulx~cqbs*`OSl0OMF?#7%Dq)%aH1~>Cv(j)*+a)mP}ybdWKis_l` z+(Yultm-tryZa;9rKdMucWAciUy$Bu42=-@s^5HYS%C{J`(t1Ff{e?sSiRS8a=$J3e%Z_`h2rKA91~TnHlOFHR z34yH#o1z%`D}4x$+}ixW=F8(Tl9Zt|i!!CeqFKDTL7pO(%^_CClA5|F!**Q^08Q8M z&)}9D1%bCYw;xQ@$^M|Wcw^4vpwAZs;vtb|+uz7WNPIaNdJtSSyx5cD#NP;dY8Oz= zF-x+=eii1>C6iVd19lJjasB;GWXH0U+RPkx+geM`Fqvc^n5_ZnsLQ;zqkE!KBKl)X{63YBZ+lqa`m{)X54$- zExl(HRYqHse)FQn?ap+W>`19>_=LUZFXv)_xWc8c+9|WrAAc0Ns5Qj>ynr?hUxZIq zw36WZoY9vm>2SreGeoOpAnn?~_t9;P8hZ}szHSs3!DHDus5~Ogtz%5wA;a-0b=5Vu z=(_u3KSsnFm`AH8dPdlMW4XTWo;}Wn$-J{Ty4jUh zl50G*XdtOgV+;8dZa1hyNv3m}!?I15w4`IHh2WX3C5`Bp{bxJXj@XeBOs|FgMSS8! z(NlDfvwT(Qc{$cWnK@ihOg|HwBB48%OE$wvsSd=wc2NQWN^i(KD795_tR zP$~?9-BxWWT2XU8@O5_T$9lEfb#rS$ecsN>A5r`3UeDc zP=dXd8mE;jn2gnzo+TMzt5M{YE*r37bjx(SG`X6}*O-iWKs9|lm-)55*%1uE$%*8kPw=ab;&ZTGY}g>Ia(Q z+NLIy;SF3bYO$BKtDGperWQcG+CTX^5)3b-n?xr_!A;R=lnF7FOIKQ9Y)l_9PCh+l zUKFt^4vf3GC+adUVRhDUq(UrkN>wl&+J8jFXXDa#w5eN#l=$c(oeGonuCU7VFxEsW z5f=@}HaFLs(Uez+W;S$t#=H0`2^NJ_?WpkQfDC|LN#72Hp?n z+%7;|y>g?wx4rq=)lT5mb@S?#?gzo_lCq1{_b!M4(}RED`7;XtRDK~?e=e<^daHS@ z?$=L5DI~njXA(n=mMeOBD+*vrLQ!Abe8V2uFFytWSs7F>XtYf3sKl)hsHDjM|~e{4%MJ zt*N?@8{Q8i*RXzj|Lf-64Ufk&vz?4<2}uv%o?YA2<@BdBXl*1qZ;dDmxT{31Pte!% z;wmY>Od(?>r-~nHTna${W18CObf%G3Spj_5q$uYKD=6UP)k2R)rNEL?0QpwC+gu!;y$ zb!&2U^sIqFzZ(c-v_CiB>Z zBP1ot-?7G?3((2Ftn>%52m2m{ST+Yx6g?;=Uk0l&`!{cxA9cF#8#oy7i22A1Js)&* zS@pAyYsTYs&D$~ligWa6$*_>d2HDWqGN8KO!K+FqM*3(Cul4_Eo8QgRP2OMLV(;F6 zf>;dJfed~04G=;7jItY@K%99%j75PB_GqgN$)n2#A`h{qMjqyG)+LfK^*t)B$x#BQ zLVVu-pDt&t+12*4l}%4ydm6p4^K89Jc<7q8meXa@v73!x%aIyP)zwROlzRE}({fs> z1Ap&i9z8X@b8k)l`s3jnL(d4WuYLRJv-HV`ic;USm9K$G+3~1t56|6uU`&q!sP6nI z2)KY-rReSJvA@7R^R{AQ+j=Qk9n1R)4jb7#2xz+CD;mxKPszwKd+V>U zSvPA5zC+UbW$VV5xYP*c(NhfVXU#>B)vjAQK9IhezzwrI=kH8jlJzn%Ce;&QC3zWv zoq(4`G6NLmiP-F*HrH4qwOmL?(C#!Xc0{>QOfY^R z#)DEC8j{D2Fk_7vWr5JocZ9=J?J19hB6thQr1UKz56hup-~<*4Pamh>=Nx|jW#wdQ zm{N|bv|JniQ(?lsK53J;V(rOSI%}nOM+U>)kOvif;PiL1q8%QVH_!8y>kd)FftE#gA!^Ap>guwPTm8~g>Hi;&c8eDwna%I?` z8WIIVCAU9Gw**@Wt*%>AZ}XE>v~p7|TM zVObaqQp5P^##P(vz6VU{SH-NwoTaw7;cfAqyFP)w1vF%C_NfahFY@)F(VvZC-J=WZ z$<`VQf6E%;stc~yq--}Gky%rP%?yRm`#$?#&Q6|D7Z-UOl?V>{YCEdW z0KudQbs?6bR$c+@b%RMf)Iuq*%%{)?E}gVO;N_#`SC|FV*Deh)zjf!+$Gw~9Xia7z zZ@#vtoxd=MX#nrjP0*Cao}i2yV(MjrTCe-Um!spMP{mORx$=e!w6hC<9&M)dE;g+Sl zykr4h{XB|6?gChdqA)1e@`kc0ehnT*!>2n^%SId(-JM}OTP!oD@zMaY1A`QpknI8vJQTbRKnm8Ig#4WGxUpfq7-;l&rp*^fd$6!odQ9doO+OFee&ExBFHnDRv z;UoXv{Kr_Tk1Q{GUKe`pLkd~7lDg@lMJAHn*#6bn+R~sx)jHl)(2)s5(T{#yc?o?& zZyh{x&D2%a&t!e^gD;iZW@Zg_w$QDJb+N!+WaO!$nol}kookI619-DF0EZv|P^ap_ zdD!haAjcq=+q$tyFX*gIF7Pc%`LMHs(c z+|`O%i}qpbKmd#^#a{Taf6>q+g>bWAK<$l3o6n?^)z&U5qAZR>>knZOF10e z5d5Hwma@5So2VSPYT}36@}l#?V7RH_+80&bxrdj2j_LaM{O)JWf)i&hKbGiSpgw(C z>HB2F)Ml63&C)jwyKL$SaE)3haOcRX$jK*9^s*~QI5>X$M-UrMWA ztNjDpOi^z7ViH-v?95>vjM4Q*vm3@hQ4~6HZ>s)%(3PiCXu!s;$E8~H)%I}*8u(B zE+odig{;qasgqB-T77s`HYH; zh4XpV6@CfK%lTN)m(Gvb zhA}V1Q?Yj!IX(G}AeZF4yqzaR`sx##aORFVoV)-bQdVTU@8N0h;_aX8xaff#h^gPf zkN?cq{~})h6SH;1sI#V^Uen*q&+I1ma?|y=eV=}#=N)99U8?ap8z&u!qxI-!ilMz0 zIwLhDn+HOqW4*Doys6TwFUeKVhB~LjD=Uoya(FPqQ3@~ijxACuFhiVDV{2XuetMFCpJt(KU`uiH zH09dcUlnCzHy%9r_qp?LF1^`oKdTR4Qc1Uq1$cx5{P^?S=+w$IQp#hQIoBeO+)yc&kBO~;YON#VI&(0W)>tY(JVa;6+FRA^$=a&0_ z$fCo^{S+Bv7nu9`_}^u)Rawik5nBzN#gm9Khv!iv_=+q!#mfCca4FIyi%AMlrX^US z1}HIdpNrhegp^(hBZK~gAg#pry$ovkta=)PkC&!Vfi`|r1tgShAh0R+PHYt6y2 ze5X(P->!3IX9`u9esy#{QoLWZqnfOr4VlzSI=cq4!R(H(_diM_(L!B#2^(!oD;NBc z8NxjcIN**=WhWQ6X#*jtOLM{K{wy9*tc4^;sBbx=%E{TXrvYI#e(}CJlcQGJTo)77 z$pW)S2jy`6{v=zE`G|oyi$>U_ox#IWijOVz!n~Rq$6rBT34w_j)RCb19t&v(ZD&l~ z#-d+?w3|M?`EhHuZU4c@8;4&9rK3*Uf78-|7^(Hf z*wDSpSrdxc<}&3!AotVGCLoG&_NG(;1MdA^-hXWwgV4!F7Wu$=xIkRx4hi@#IW&4b zs*?@Tut!Xj{iUjxBt`$X#r`A+_Hbi%vhQ>$Zobx)t0ba~bl9#`aksPQK{I$^0o(kdn^&d0+x z5D#)DcpG3=X3q!*4HNbOiA3R2q2_zSCL>!B6M^6ljEyObTtEspo=)6I0(F607i_R3 zhzDouAG4x{A%zL}3iGDvh(`TYih+uppox|m)Rky5O5xHHoV$Zm_t{A%yh6;3bOM3q zG4rEIBy)~;V)-P(?#R#DvlX~qm}gOZe^@;q1Zf-FsGH?Hk6X+NG*WX5n%aaA)!JG^ zjCK3NYp!`IFh0nVmBD5cF+%L4?#tF@i+q@HZF&s`QJgCs$bj=wAJXAQG$x5t8E`J0 zq$F-`05T4ua><)n(Nt37<1`kT#1Ym`cghw^Ms%-3tz2AS1LSG*1d{zTxl?4435!DWEG(_D z1sGD8tOun@y%fFr)+9)X9f;E$n9y-GpORMuxY%x)OYRLMo+~CcW8z2%0DxZn_Pe?N z-4xGT$|-*~K%)fv`{x%hOwT>#UupIRoy4$KSbv!+&SyBeFadvGKt?)@ag@z4w66@V%OWmk z-R;f6Z0k&oL|wu4tz-VN44g4%*4g>ungcH5`r&Hzgh1wz-}*!XvUl-&%&H zdwji4ddwsel7UEkFUPJ^q_9|e9zT77UUbcw1$7&)*C*hpR8yc=#nz~TrC;Dc|Mz|;f zWSp$&lZYTGGu<5RHJG0}r$E|q523o;8;;@Y*~&%}rKW$;nQ3jTY-6sXn@TBM7m&DA ze7vQH80$+$6>x-U)JX_9XFPw>ygr>XZ&;acWqWd;XCco6zpl+4GITU7syWxGQmD%~ z+M(}UXPC1Y;oHm$eJ$A9I&CaQ71MVh9H(s(ZYaV-Cs|2#ba>&Q6|@wp6*C4couCVw zz9Q(oP%s-;HCDw^tC)svEPp_j3 z{J4nrpm_g|L{eDdMl0%abp9=*0hNDQ5>SM1Flq^FT@om!wfD^n5ibC8J%KyKKI^hV z8TVo0lP_v3%`4ecoJKFwbR6H(i=!2*>BY%YTUyKE%tuVOsK(@K;E-U-hCiGAHqQtD z^6sRy?yhzE=FnBI6R*EkUml!Ury19$l!ds*TEOXikhQ;|4&f()%Oo`{mW*GOr&^N> zYaZh` z-A6rKD0?{KMoirog(uBE{_R)OEjPK`3;m;~9Cq$uM&AA^rm%4}on2>E!{_UCt^+r( zPZCgyDV?VexW}8Eb4L}bzT5f%iin3%3m2*?F`tFXhs_+etg; z za+wt5H;FMTEHS1S=lqrGQlUslka7~@EmdHlMFOMkY4dG+cHd~qjS6U;PA11v zdzM(EvDU{2=ivRFD`_QWb}!iNGY%au!b{+|GFyu3(;cF|Faeg_1M&dsC9whRyLg?K zQLE|c>%m?Q?WF+*SIs~$3jyUzWot!pn^#u5-i^DWNK@waJOFn&S1 z(ClgN2y8qK8CkB~BBss~FDG9}sLtVlazK3MY(k&Bs62pC$@LPI zsPH4X*URLJxH=Iez1(6)aU5FR&#Md(G7?vrA_GIK+=hx|8*s957&a6KVx^s8%L+>J zZ5_NBcKZHEjG?G!HoTZcKn+aJKO0;987 z(8tGgn(!3+gs$Y$e#uBe7P<}DCBCbhjA)R*Y&+>p1+^#-uMG%FBB*f|dC8UR0t&|B zP$xzQ8yEO+E{fN%Y+Qb@EY`<<5VJl)4&~?_G-TMvAkfy+NSBth)Oq%U*#HMGXPKfC zivg?f5a7>i{fnPD7baTs%Ztwsn`mB6{4*ykwC&`lD3U$5rA@>#ie6XiQ8T{0 za3WqBG>g%E3`NC&S-1j^O~p1skh8gU5^7kU)oi8PpM-lmn6ypekU>a6Oos%RFu;`X zp%{)02(62;CAUEaYG4F)m+`X&vIY_ z2J$+Se6}l9JySSu7oS!QY~6~xU5SRi)(z1gtlPILGSs63GN+>OHCY(rx|GWAtu3o@ zj^@h<-bHCnm^7_QPDTU3m@{R;<&BR2dv#CpwaQcTxraGTUP2X9QUNN1-MYa_h>*M* z1xVEY8Kx%f+3~e|cSiBr{x8LcCvKg8n|Kr0Joj`>7Uvz36;y+a&_TFbx!&4oNx}k| zi*N-}9jmP|2RFgMO$s8Dea<+!+nE!tLp`z(fgvng=@oI-r~ZxGeJ&4;?sK|YE}cEQ zb8~_9`UdKlLCXpu0mp_kb3hg1YlR)q`6H1lkf>7U8NLWXOnb0xz_H_gwT0cLvaJ)% zw^ElDQkf@?)VsV3edWI1p0q@`uUHjWC@W^hQeF7zqi9kKP62E$^UVZeI(^FLbL$>gkp$`17sG%Hp}I`mBr2ZmMUSEHfiO59?&h4N zEwVgIy?+joNijzryc0s$}=MNF~LFt|1Ow5;Saqh{A9Kc{>*Y>*S{l&vdWYciEK_KkO%nC z&3lY=YZBk_)B0-ba-I?NYY&W;Z)$1(sUl?alqC|#3d;NdRmP-~xK=`g6S3;mj}MIj zz{9PKsv8B{8y@VbU0U6U4R|_qek$ei#QSYQ@O*oFFFWlP2uj0TFC}UG*711HtC`Di zTpl3ht0P3nuNCW}W5Y-gEQsj8IqVm7;DW>AQ!kn?wz}73@7DVjT!+Kl zEvw!umlhJ`%zXRK$DWV%j^$NF*`;e3wMV(^^_EiByLQqZr=@5+9@2ej3}BX3IUH#Y z6lfxP^~fsJ01KxM%ny;#bk7?G7|dHon8(YbWb+-MCCxIz&@P|PV~DR=;j$%onPNT@ zcNS{0YsOicvv~3~jt!k-jl`3jIeH<@S#U3*=n$5f9gEkw&XLA_{Kk)M#Ly57Lq%G8 zSUgghyernm%Y~^-_HYGjt}ygoizd7mQxKD4Lp0*{Cm|;Ms1+*;b(F^#vPoA80gPQb zdxkiU?IxOaSGps|cq8mok`mmZs)eNR&^zh+au}|?h7nZ zwH7Kw>;nG1Eygn4@|5~ zpVSvkbotI_6EZa=ue~a4@Mt2A3DHXP?GN;!+gb~8_GjQb;dDGJ2MP8>^%0ghVP{ff zNmf=M?qzqf8xBrlBS_c7jXwmC#^;N8ip+kP42%nKIOHYx_Q`vN!OqEzEj+a;cH#Z- zsi$7Ie`*xEk8dAeKa-sPI#9ZcoLt!xv;;klzgoOyVC}>l)}wRbp6mdtb7wE#kFEPr z8IYGu3ic*#Dz4*MY=F>0@-@Bz@&Kh|pczDGA!RHj@CN`6>Z6vT8kMq?e?ABBN{Qp~ zDcf3J{`k)La4iGhz{robJo9|uXc6%-{8nJmoG(sbi?+2N8;j z(prPj!k+9HM0SxhU7Gn&e8zgxTnV~x3xit7ypd(c_Ea|g04F5SGXs6XaARXCN_e0OlUWb1~9$fb}q=Y77jJCiSX#Y%!ZZ}y;Lerz~pusO)I z2AZWGn}7@~FxOADc+LPq^P2BtO^q%_rV5ve^74Jy0&t0v z9uXW;FH^CJ`4;Xk@XG#eYX8(km zq5B{SUac}yS{7l?HcDMq$tkv?L3rV4Pj@w*7~)IvfGFC>MaDUcInkN%t37twpk$I; z<^=QRyPaisgAWxP=7O;f5aRMp^?-BwLwS6NqO7FWG9zbhwXdts(hQKg6Lix{y!s4s zY`q=wk&s@7FTaz;Ae(Ap4Sg-RB15)>*?vu|o0X$og4}Sxc6`1iSl5V;QQ^=Z({bE* z)G!&cwwisi()uttBWso14R*9mBVKi1PvtOrS@@mN8b*i`Q(zuhz-ZS?by+^q`t?w( zqwW>wj{7C=S{8o&p-d5Cs+$R`mBJz{=1PRIQe9L8-Ljx~g#HPMMh^CfpEep?(c#a# z3Tu+$INDfvH^ZOTe-sX_%@y6nGL03A<^l=@DS{YvS7b1tW=>flxN@&OVX&|&30lxw zXgvLpqCmA)x(ilcx(0ITjSpJKS!1oM5&_9LM6JZ2x`>82m@N86&W7cC9~nTwZ&2_& z9lt(%$7uODC&2}+tf?3KY+;-^r-uBs*VxfPSXZ%<0P7w|N=GWwNa<6ekX;J3JK@j6 zPgNW%!VzYx&HODcZgKgw6smdiqje}O#LcU=O@u9rDnFJd@xlWbZ6kH8CKSsu^fu!0 zw~^-{_Y9(~$ooLuCV2pE!G?XSQY(-#EQY(j!nfP)ATTz+Hd39pyRfRcn*MLw( zTlm;{wN}jh_rnuPc7rse_3G8s=sJr*`kuPc$k@hGWMiygYHYkGvOJ)DdpviuEPjD8 zeOOb8FH}twvG0>mE?g~vTv*f6qqczT_nTqAOWWA!y!w1UEXMk^;_Eg2;Io3X&86P9 z%nCyjmww`FYYkbIdbC~^22tn5$z|u;U-0+t$eRP_&_KO$=_<0qU>%9(NbKqjl$>1* zVXI+6?foR5vhS%dEl(*`KpoV!BGvr4!a2;_`wQg(rGX0v04Nxl4O~M#PFIO}u2{NJD`54&JeOh9JMD@Wk_A>JM8fDtR ztMGnFOn;3PS6PX+o)l2!qp3ssiMn_FN~Ujl2bR`Qm@puuLBo8ZwTCwFJ!@(NN-(5D zGaiT|_oWYry?9)03YN+>YI2Vc&W|IR_O9U?=V7wSifWX9BdCs`$}0^@RrnH>o^iI- z5+$ytIYzgtv3A1XE^{bN?p9racWsOnG49r6@%3zRJKhOyU43*f{m53Q0Rag`0&k0H zeyLF%_-F2t2aT3;>&dzW9xHr5J72fLDO<<4H5(#~YqLTE9UCP1)?%_YIHA~BbkM+h zFHmP8i5nK_>)r()b;cqli`Gfl6pCbPZ|5{kONfeYl81~=Jj*P_F1yT)2}sps(nuUB z4hSJ5NOwnHb~~O3IF6XhJ2@Wr!l(W5t5deSpO!9d`!0w}G0m`~D;3;x0m|Bv$13W$ z36DAdcm#=Tv<{`Z3aFY(3WZaK{FUZg)?^wDY=^MC5Lmk&(xkz-kYbV%0udP+q0^IB_%q!L@w@? zIFlnPg%K0F4@UL`b`t6pm~4{R*4Q}(fwJE)Q;caX?Bb zDME)%afG}%xiKDF3II_p;oLhxot07PSjkb zR;+OlkN0NsxskCR1}%4^@dFA?lr)CG&zeqJBES52w-a;j2%+TA7)8@;FTZl0Wvg8! z%Qe&nGN82URZ~cYrr8x1kGf94r~}z`BgW#nK-vK?{=&C`0LSIhCvAf*JLLk#kFN*H zmJ_T4ZucvU{tWt|)E>`std~l)gA{F!d}R@n+v>T~88@#MILN}*&|tmw4YqL_-tHle z5>OKsi=71Av!^)t`w3OWwQ^XPba6$#9LF#~SVhSTCp(EQ%@t~1Z`x{Ecxx#}9&6oE zY>mEYv$?-wCv}k zUv7M-A4$JQK<#Bou{z#2kLc9J1`=Q+8EmyOb#?8Uh(J&Ik*0duE>dM_MC0^oNtrzm zv$;(InoPzv9@zZ#amL}tKW0W0hGsA-g%&Mdi0L$v;cAGzwbSvezE9q#f}+s#odc;J zs*oxAe0pWbYRHGM5lemG{2!L(muFSSj?a9l3f?B(67(&>dNhj1Z~a%nvM zed{x0Kj~mrX0*3|5tbT`%f}79MDxSK?5pmxGmZUgT*`@4$wC7@2^7vwgCai8<(%h> zV+DrBlNrTBeQM9ni{+VN=R%?f!8*nOtQt0L9Qg#2#gUFIPnE>9k}|ay`#nI(VqrP2 zBD0w_=6=r7ntk4Ktmed{6Zl+)9G-jqkO?ju~HgE^2H&<|Y%aWt=PSu;Sz!^Nu=?X?A`XyHev{I{+xsA!k-8ac@ z{d4EewZxUzPskoF)rtYz%Ep8Zu%XGRr8UFfzhtPsV4&G9MHDkvFv6}{i%ut@T>=u+ zF&Vyk=aUZ3`X4(S=gj!Sp#3v~YjDEua?zN#_HVUU77T|1i7s)Yx9fcx%<k3ptwQQJ?k)x<*vH29uO7yZ2`&R!W7I$h^vc${vLf?FeOWq76Md`+ z?J(*4Vuphx7!Thfx}d36!Ct~LonF54Ik=epCFukG63~5JV~_mc%RRR!z0lFZw+)Si z7h|`NrlsnXZG7r~ze|EOHNH%ehd8rfYG6Wu&H^0&?j&E|tXOkU z1tsHYsxT<9KjFciCj6P&*?)fAwW$12Q~1-#!w^5-!e9ka12+vi!7B`o-GA@&C2vjm zdGP!;%a|`Gqv^jkc{Q2WTpWd>eG&C+dm)Iv1=y$?*i zg;*D~T9S2#>FE`}2Y-Jlf+5hZP~$j7$rV+kW_?Lpi){<4Wlty!FIhI@~?7Q{~1@>)wjBy=6`Db zWD&U$*99pRKQFXdO@pQlX@=SIOOS(DL^(rZOA)SM>`XPxbb=ltit0IoJ=xfO9tJVg zUX-TSj+`{J8}Wu#u`mskO~P;~0PR0ev$NnIv)ff~*6KIt%r*U;`f3u%Q6`yB6ipNU zT2%*PsI}7}_WKgFx~qwm`9-uTPEt?LqG#VxJlY3eH$VaM@7~RV49&Ao*#?=g?gjcW2^?T;x&Qel|-_>*;9WlAuIBL{CjBPvPOMK z9p`)0Q&ab#T2>){v$OGyP1;`PJWkx>!|^9tH2WuBjxTf}b5wg;eRKSH%{bNb=PH+$ zDdH4Pjes}zh%{8?$dy_AL$>x?R2tumEtx?s$W~&DxdYm~f;-KAPGLLht-RUs^Ix~` zhAUgJkqtF_x63zozt>SbpOyN|Ii&yn&(7K%SC|KSEWdR9@VF4P8vwLMwSj-2D-GsK zv1UyZ7!mvg=39%srz^?asfbt`r2!RmgLw6PFjS9bPxkPygQLy;8_R$CL8kcZ(OiA) zd(_eT59^82p3_=ajK1bFG>lD1Oq#Fp7Ic8Oopo9$z+^%hf->V`3OS5{JcJWHzRTXD z<7YTYSJEq5IE|{3FnlnY>SKE2cqR}s>GuerviXE+usOB;>yPrU2HrCmRZc7guuR&0 zd%IST&M;dI!{$4yK`wfHNVhr`r3M$MIljB88NKPulXIsNbfXau*uyav3Hq)HjKC5x z<+=P`UeX8@WeB4O83TKo5gVK5COY50KJnCCHO(=hWq01BUqyby60O_j%!j z_>2KjP?~`@h8AHXv2vtwzTHSR7v@)xW?o<7Xmha75WPb3C3xeJwp@)%8HlkRuqM*A|ssQ;mRe;suNGWh%{$8VoXyc`dIh`V+Xr^eCN zYCob?$^Qp_NOvnhcoRUheqJR@WvT33b2FK`XR@OPkS!Hy9JcT4O)c;n!0Dqw4ZH4~ zEPFjvx$#@>k^j9b<HsFS@;2Y#_|}^5`+vE9LhFFStf}(|(dLW#XS7Vl zgc_;jl1_ckHvww!j|>zq7@2C!7e0nbiz1YB#q<_-GpH_zFrKb zs{*kVZH@p?q~^K8t`7S}KGxaj|H z*!Vn;+2(_ziJTi*Zi8~lvbU+-g)7?O=Kcx3>>d@4E5wh4lk)5H;VB_+TsV1jh8oN8 zI>uL@b?vM3F{hjjVVSzJKI5gpV6`QqJtWpf(yD+}NnDCc;ZqfIHy|>@ha~RZ6-bMG z)?pdb#86MTdfd)@b+(qv^2p1xvx>*E=>QE$n2&;_ejOF$*k$D_z zs{v8^6vxFt+BqN+%n)5iS((z+;S;`ooVxp|XU}iC_sMN~b{{{Gk-`Jmg~^;<2$>rX zrMC9m<=WCqv%SA;*GS?HcLu!Ot%i8AU`pM>uqnl<)j)|3ZK9{fLS}rG#H+M%NkD~w zw@GF0k?mvWGA2%V|M5Fn@7;0vR<+u%`M>iLH0zovE|vvn_cV|OQd86cL+c^9R|Xsr z%(^3I+F9l_%Wb(IdAf9E3EKl^?iwZ>E1E9yLIqupepl2}bZ+j1*+P=9RqXs|j-msy zJkvG18}IEvX#`Kh#QhK25ZenN)XR^r{PX4ZdYrEClsfwvv5|;}Jn1$CMyS&PA^Meo zY@N|DV)D`+rLZw%|8qHQ5}6Qq*6lZob&wajC8OVnf5_me$MNJ&o-b6`lX& zvF*A=wN-~WTGxNz_*HnrUR1?WN4P45)9dDt(93RQO_2t1Sq>JasGADs0?L1FM42J> zx$8_;WYqu$2lc)3#G@mEZ9Y4;+U>H_$@J70vSX3h*k?Z&rPnZDwzZ6Ecv)!ezW9si zK;x6Gi5=!e;;0xu+Sj<1kx=`Xcc;`dYLOdBYA(Zhf0UigxaFzeCHi|%TW4Uc1d`+& z3qXNuNO0_*?G$YEVl;bay6Urc(7hSv^$&~XyAKvvA5Nvc-xFHilEekzJ6JFit4cXs zKJnf*r!;(pVwi`CKZI*Rt4);@`Rz5AMXb)IwHEqGgG}jVt8i{!vgJ{6&YFEGb3VKN z@Q!^V0R*1A;y#&{5_7jku4`m<^Q~u2`1&IG@moRt-VBQAK{E}-Lh&E7iRbC!IslG$ zVp*_0VzU6?_aI;s((d}9^ASE9dKwM1C1p?Ce|4WnwH+V@aRJK0KvF`^}@>NCayxP{7 z=aMHcPURU9fIXH+NmW_*^uB_!9x@9)|YVwQ8Db3P6 z3+P&`pj4IvV#=pAPus3`6r;FD>PJ80x0eZKhb4_Sc`uI3_QUOTm_rz*xhmuA9&rnP zFC^m{BKw+RFlQphk+ukrB{2DcaAaT|1fq2i7%1WFX2@0JdB~v%Kl&^A7)(nnKq@{x$#^80h2K+-hO?L*Aa->UzpBC;2 zc6jKH8_<&cLm@?n61fgL?Eq^U!*H$Z?87(Xhu5kD4&bFWIm~mfRUY#rDDU&PHmylL zJ=P0&%>3r_HODs}I<;`40=7!=Bf)4yHLz{h$_MMWvK&_F8S%uoF52Rnx0yu`<4k$H`5%cv<&~6gBVLE}Z{PfF z-TCRFB6TNFW3^=-dzVaRJAbl#=K1k-(*9RBy-A-cYJ6W!4rnJ)Q5y-X+Q1rK4V=;} z87{4~SQb5~RsTWn10_h7ZP7{nly$fSv2lp!T1Aph0dR0yHycxl6btQEqNxyaP#Fh zYpGNtqnBkJteRT*71Z*`%uUy$1g9n@^!i5Aqz42~#@1BrI~R%{U7FqdcLn+g#@pf9 zqvdz!y0Tz6eQcsDgN7_~wv5>|jBBut&ovN2Rm zJe@q^c`G=*%^Ol5f*2oyi|r<{2G0X}EHht(4|7&S=JE9vB$BzWs-}q| z+J1^kHsEjwYAu|_G5Kr`MDWpceUsf6a>k71hSzQ1SLUzSMP%09dV@C**7nd`oGkXy zCi5C~es^B0kNkD`_{4sPx%!+y?L-I@q{mg(Usv_G>QiTGq?|^L1MhS_k@N6kyQF=* z4}&iMU4r3l_Lhp6picd=BQd4qji9o6ndF@xYM9BV)4?gskWUm@g{nrD-fY0;g46+S zT{g*swQ;IK6jrBm{|w!qmCwV6S_faA)zslRnCtup5$e$cnvFix5_mCqn^zVL zsY=6cWHP9-Y@}p0hguLDJ~%s~-B{+owa+ro-1t-bpZBDJ^WN8j9nPvBxw_~2*`Ui$ zFk6sPaiF#g9g%WA!I4gxbTcXny03>SuiKY*tnSw0gWl`>=&M@*Z(-JhCn%_UPHKqZaL`db*woYw)1KZfHlTs5o3ORy7PDb=d^2ap^+IQdakS)vO=Sd@+LD`NJk9F zvFLT%+Yel&L|3#Q?F2}s1#YW2pwE-?7z=n_P444Pw@giGpx-`eRNKmJ^7A(nT=VLP zdk>DtG?T9Az?n<4V$j6lxJf6QmFN*yRbAOAn~UlA*g{ZXb(ZNIOOzg)nza^SvVkmkiHMYX4}>6CXS9)O-rQ{=e0u zXgBYYR$hyJc`AIcPKP(l<-@@!aSZHRIFx}e&9^pvpdz4m2@Sm*ril*%rVO~S=_2bP zjFdMLa_!2?1?6Am z6yPpf=e9S|;$C8`1Ld-_R>sm@`=5{N)x;Z^IOujfx_yQ6@Ys%eCyvZxN zZbw&^3nM#xLj>(v`kTO9g8$L-R$O^Vg)0Rk%Hh?e@eQp9z3_0-!P1kCB4@YXLR{EQ zXeyx{&vp?cJx(7H%#YFz_NQNI3s%TDM_#2x7A$L9SAX!R{(h>5sa=!(`sp@f+QQvZ z|Dq#D*4-n9Ze3VQb_lz``b}~q_X))RQ5DYp)YDl0hlCGDtZFPmODTv&W)Z3pg%&REIdB$awys_ z+6*uV4dZda_Ssvh*_6>bg*ce%(TL+tBf0tmoQB)elbXsoDB!pVppR!<0s3nFfKA%F zJYKcicDL?+V+HR`AK%4G2)+5xS3hrNFU$2jmN1xA!e(_1F@wqWo28O&dq=m;IS_L^ zsPjfu7RB}qD4do`Qj`i8w4bl==fhinc+zG5)7u9%xE+j>DG~k7M!F%P7;Kk=UffrS zw!3#s-O%8yfuZXvg%NB`W;p(prgfld=gk9-`ZR$Bu1+6`-zS_~4HUYe8=>_CU~I$g zmAJ*-z=rcjP_$A%_Th$sqhD@M?pu)BMM!17nTO5g4T|uak$=h4fZB-?o9%2&{k6nQ`8RZ0t zZDTIu;1T%;jO=EQeD(}0JIIFoa|M)h0YEwDG2Pv0et%~02<2e#K-s$|KH2@a7#!7Z zW%<(K1cUyE2j%D@y2@Iy)yirnJm-aKmBYg_yg}3_J zvO}+C-NpIbCU=tAjcR+ka#Fh%7oMo@V&yckvdIZF95&K6QyfCO-`>1O@c<77)Smw- zWX%4zkYNWFGIl6`dtu(BXzIRvxwEZR8IzS&ls|K2Ff%hdOWAwl)~4n;)~-E@k8QS3 zR-q|wQ)kRpw>jt@p+p#@hn%#N)#ozXdd`rkD zVZf#RCt8{n!5L-`C(at~Lx*vT+WTC^2FHiB5a%rbpMrD2o;~?D&YYy+%#;6+9vZ-| zTv_`V`1Wlvw-Ea*a9N09W|(xInD2gg=Lohyy)>&fpe{U$w-TVuBDs0wsG`!@3X;gXFkOBgfg zQ|)ntU*EagZ0h3^>BWXqb3I;<|8D$Y=TW@nfiQ24zRa^0oY2yXJ7a7Xedcc}9e3_= zH`9qyOK5G4B?)E}OLEZ8^v_N}^~uEYkEl5L5ylA8nOuairv7P_!s28mLW1_jCo+2ON%DnNYS^i$W)3 zrtkwaqYs#^&!aDsJIq8bU(gQ=;^qNe|0?2uBdG2+P{jq_d+M|zwhen+b;~Bn-RVOO zUeLKC^hC*!4hH0b?a~ogl@S@B1Yyzf7*ewm3L+1h)2Nyt^$XR${m148&gh|-&Cl-K zOIrWNaEcE)ulHShNBGn$64@eNLeYe?MZPSUB_gIozDPDgfCj+iNr-BbR`)<^VqVB_ znBeol&x*hUo`!ara87E(V8@$VfBz%@`$iuTH^CVJCVB*<$Y)fn@IVHnJ%H#Iha6tT z5PPT+qc|Aww};)^LDWiRx?uQ8J5&^S;P?g`^6ee9GrtLb$rxz63G$D%T8dKRWp zL1DdlU!BzH&X0`CQQ38Bj`nlUzgvGAb=A)vX|BaIavlIV`OGYlNwKeSj#ylglxR{Q z{ngik?2>@Yi=lc*^zrKg)?Org2ntaQo)pnReT+$?$@$uHM8`NSqHamMLifYJ)-PWs z{Bx{XiJ8VO^^_(L1v1|Jt?KA#y1De}XNxr8_N_S%VancpYHD&){wkw@~J*L%Mp`zB7I zdbHsVi57fAHgt|iA5vVm#3HS+JIO>#`}@&i1-ggp!aWB{93-fQ>R#97QSH%s<^}ow zgI0kLi}C)6x4x{MDXIKrf7h9r)$lFu370;8uJk-ym9uS#G5K5JJ(r})EDn0j1W|kQ z+#{Yw%tP(Ob)6}dt=DlMMftI=vmzHpbSfsr4Har0J2tNS`zGMdS5#?ysdG4WZg^a(B-o^7;BH1?A$!k8%Xj&L&$6xh@>{ZI&nJ~6$N7*pdbpE=(&t&XJ;hCJ08S(4G5aX{ zT36|BGmrd#>z#*owAwe2K%H<%sby&t-dNbrfoX)X%0`y+uhldcHZa{)XR>ae^Xqgt zW|TdSFISt}w{7?L(1(9lEWDn#)cKw^Lw9Du@M{j$_ju&s$nd6Mh?m|dp)x9-+ zP(SPLqY3I9f>us`56?U;Wma66#x z0juO>RM`+6h7Y*$j2^!>ft`*So1hn4+3$pny6o=gAMex&{4Sm>fZ%8w2MFUmel76K z!-U*D`_BcmeE4Lk@QMik_BP&@Xu!5%b0evHtB{FgTUfPz-?bw3PiIEHU+ccgTo|t2 zXiA{f!L|JWME21VBv;#t0?QT|Q!Fjo4&?V9N^DCFQW_&bv}KAxf|>_mwJ7;uJL!j83ewC_#x7 zVhgJh&_UdZ6(Ld;_7rswmu%g7^C`jNuZ#k+zIFOf4F+nuq4y}{~Hk% z5eX3uObrkb*8r&y%TYiDLvzn;8CgUvZPLhT|GEo?gn$O7=8D?xl4aYt;*y$6HEGLK zT4`f7Q%%iGQ*&S6-*Nws|L?vZccu)TyJ%~_bT~Dyk74WR5>WHA^M99X`c)k)>WIC)_iId*dt}(_?YULPMcJk5 z;le?QRhC$t+Ek~PQ~{lbbAoJ1VkDj0WmnoTy(?Tnuj2%}xJTWuC!_Hiv?Qvc<(wwn zONK8qt{m41W*Z0+4`#K>TKt ztAU&UyQI*}lOJxk_wiEvJT(wl0GMdy0O@&%HAt}Wx0n0LNQ!=O`q_MhD-hQ-%GUShymO1ar=Q8=hYAXQ{2$QzEimV&F{HiucYr* zSi85bjcqz97;XV^i^DW#R_%%^|%iOADO$Gf}K`wf=FL@Vwl zthn-qcCndHi)EIBE)g%P35D$F-#>xWDmc)e=aY(-7?Jr?`y{)9jnsKcM9O~c$L$5s zmDG?Q#HmCqX=##QX1ZfCj%aqUIA;YM6u#>li}L8o9eyM1;F#sLnVD ztLP1geAM5!eymMsJtHfB9eE1(>7zX5eoZGm#%!9n>7-QBiq~le4ozKS*)65gu!NaR zm3DaDH}w()5e_u@$HGnFPDdRNZ4C0%y?u~#Yvl`VgbR)buapB5Nk+iO1Im7Y_4YMo zPG*EXzvP=JF_+dp)DGAb5#86 zS%_$;d)}TYSx4)dim=#QCycE;)6$U4jDY)&FE1xyG$V~{Uw{VRkl6#v#gX^!T~{uN zPw^d(9V&oNtyAv}7-fX-0$PgYA@7lXZ}XS9LLGQU8yn_S|Got2*5T_Up;c?)MYFR4 z5XkFPLi^DFEZRlWA7M~|byBd4r6tu&fbno1=wl2J2t+B;Qe@UmWRwVE@%>Po|`;^zRvmgs zifd4hED45U@n`5KBZl<74Jy&QZ6d&+4S4oPH?eG|ZD`)A z;7=CoO^54nVNbb9%UnVr7~tmDV8{{CwN>GjFY^eznw*&CsDYEO@)^Gd$=aGM2|NgA_iED!}TBi0YSR1*UI+`q= zs6v8zlw}ypEEU=kKxPf34rF=lC3pKbts+cdNG>id}NPZQbACzrX2dC!iTjxHNKWQZfuAN$gdnmFt{f(r7R+ovvSI z{dB{+^JT9l``yod6XKPBzsRUvyP~G^@J~C%bx=EizFxj(Drc<%_6^Pe%QBZI^kWS zwwe1f(9hi*I&<@fTD4Ns3hxJ6&Z`1)a$mmxTDF7+D&5uL9=3gqNrJ0g zX?)`_ZnD{at~O!!BE%)s3}g$cNgdtvwmRe==fjapo6oG7(w=1LvR^d!rlZ)+Z={Vq7o2ydY~4}b1Z^F3JdM#rO^=LGu>J;iupa_6g2fgo z3A<>@q7i=cf8{AyOt>#R89Gj$PnXwj_qrL@rE@VkN2$sh|NUQ!&#OFjf;=v$X1Lz_ z#!nZ+{t@)LeEYDQaJ=icZ?`;tFE%^z=beiy>Y;_Ss3=1iAYiH@7L+ORs|ay&oQdKR z)IN%8o(C_1x9t>#cLGK@&G$0w|*5Ky~Leszy0W6iFYb5;T&qrA1#sYt?=s-xg$3E06K?qi9VYNzSbS%>jW7fi|4EGic`L@!XJ8TrLgMruB! zxpJo6m0vIX`;hN^ouJ9(Oot^%ok1fMoG;soyAZRAS3lWJoL?r+>pYTV54IHS zTx~Vv9)VmqF4m+LBSISIk$M7?2Imb~Qkx+8V1?cB*9;}hcH{z-?{1n0zV!g+S$^fh z`RCOW76IZb|DUTLE$vXr<~C`T=3aR-GB~PXVPlRgOJ)yoF+rp1pP~a6LmGc(WaRBCBv%elEGq@- zu|-BiC}=WmNaB4Y{PRJgR;L`C>s z`$0TjQSSwNykVjF7>&jEDxe-6`+=4nTqi6 z@F9CK*O(@ygh?i0exBFvAb6gxK@~mR62JbpCH3P4W9bK^Ubn!lEOeNY$;#p-oo zIph)XGNallP)bwmHCtSKtW{sPNv_1|$Fg-w%~C=we@j`j_UqOsL2KNORN%x}xu@~G zp-e}TdzeXb8|VqcWA)g|Bkw+5dbYc|J2N&wx8~0$OM?ENnI8&SHuclQmgyLRzU_q;frHEcs{jEQ^d1fxX1STi)pS z(btnqCiq?V7=!r&5iyeTZoQ&N08}NSwy=$5RV`xARhA}dW>BP`tO zAp^`&zK-21;$y>Lc7lN~OO%L2m1ImXwtZl`Nz#&mR;rwWmSm@vcJwp)Be}`q?>^i) zGh}sjtoHBtQ~V_5xD!!)=#Jh7YFrr4O_~!WvKH@ix+CP z2UCNGc0n^v;pMu5|NQb>U{ShbkQ$fwp@J!^t6ry(y|b$Lf|+Oa!h^F6A@wVpAW=*Aln>gOin->3r% zFpzlFmd|Aw+aC|*dTH(IwID*&bv-h_6@mjfx?osPWeCj4!OwX`e`3S5S~DvkZsYxG^bpP)9YB*qtMO*tMgP8)WQ8E~h>S?{;)4yG;y{ zPeaulUp>F!o_*b^0_U7XRbEU=dpF!ygSRycjI2y^A*VMzxSoyP=Kkq|Q?O*8c`*T5 z03ykPQ6hmC+}75m-M<=+J!|O{6Up~61Y31%MLpAn;Whms5Qy8};L;f?FD?2ti+K3m zyPoerwi`#IIK^1al|b;EGA5pJ6AapZR+pK%@4V1B7(VsKmtl?5Z;#A-j4&G)>dOPH z4bbQ7vV~qjf(>{XGpzQ&27W9Zo`)H5Dp1`ixQhZ+xK>xohsBtOeDGGQS5`FY_)wMn zpvy`WZL8b8DSG}o4cDVztiwF4Po#}7{3rNDX)+``C|Afu2LiL2W;iHpBA;}bfcPCdw%ST6-;7pLX4}5tJtH)9YZ{f!&*HWxhuWbfSr|7xJ!R#H{WXl;ac+@iMv=qZ%W8MOxJ~gTe*vvq zT-vu4phKzK0(x1U3(pr%dc3m@==46Fgip-M{J5$7v*q{okyLgoIF^5nb3TOKUkgB{ zK*j`SL5{d-4ucUF>nacn4))XynU zH4YP(@?yd|!cU>2AN@0eA8svoR{=%tFO$}oCcFCFNF3zIhZi9SXID8STt8ZYV{Q&9 zfwcd1SDh|i<)8360)SR(D=PLKkA-MmKsWDc=^ zVZ}^XS@Eme=%(AG4ro7!D7;)woc;x!&FK_8)*OQ zps6S9Gf7w88`C}^Zvf)cE&EoSZMHczUg7_>_siMt;(If)amnrKZ%@w`plhvyW8dk+ z;)LcnJa_K~i5oGb??51{KoywVm#V_-U&KJ;EBgVmlXO`HWWAGu4a3_^9mzny&C(uh z5tJyq!*{y&upbhv?(%j#En`q|qHTI^mT>D*#5YRS4aLV+$D@}#78Ex(#WR$}um8gSPPyoYsg=IMB zRH=`Ya(OGJN?%++svi$o$<)nxIVR&+_(B+Kp|QcCDvD&1?*D+}Kfr~@llF7;^*+!0 z_(Os!geD@%asM=e05CL%%I~MD)!W6><>blC*fkV-u`*lukl3|WrlzVWP zf?bR^@jQ!a*A}iaE0D!#Pev?_3!9Myk=nRyT3U<-KbxRid_EV zBf{&ey{JzV}<#ku%CiKTYooPeg3H*`RbY`f=d-UcuGm zZPZ{pE5IEHg~^pCOG>)B6U zQ_h(=@>3N0*OSoMyEmN9M=~mA3eRvyExhvTbn52O{>^*wr#d(gu+owSEnZE43 zzhoTroSKktL~1(I`u(S0)yM<58K9^^wjX@Htp(hKd~Y_(_ZcBkN6-doWkk!OREIU3 zcUbji65(V7+&*S^TVeVzw~UZ8my`CCi+<)liC@%4`C`RHXjI5PWNDs4glNK>&3(-D zO*1JlKRMJTB1z``Uo39xDnb>=qwy`s2h6C3bsxG3yoVXLD3BvhIpmBw^6 zSp)|#J}*+Lu?21Ji>4dp6#>pgkD|J4)$q7W&rE(k6Tu9ra|hCquoYv65C4A8yFDWD zA5{2K?!h0{+`1NgpLhHhixOzC7P2m?&vJE*ygSQ0zePUo@w)T)(*XwvtxgyrGe@|v zut9isL^9Co{vFh`u5ZQ?~7&CqhUrjr`&~q9`ocgBjv~=`8fo zK?Oj&oQs6$irV^h*{Ps@X(YHBv=_ACPWo6+sa=GW4d{qws&PTxIZ#h)j_IM5)4A^> zoAzy#nFNcW_qt}l^w7e(wWFNSG4THzHGsVTZ`9xkL=BA1R}ZfI>FQ%;<;fSlH*R!y zUA)+JrRV1LE4`1)j+_&Gn^Wus0Qy_hs40M5=XEu$uKy%?ViP zgtLwYr7Sae;R6f<@IbcB;M!#}(OW|>4~cs1i4ypxAg{cNC#8svkZHO@YFVR3@n+KL zN7<7bZW&3{?}5NJS6NET+s%Jp{pbYRiG&p;&SY20#RVb6W1-=1uT|{qJ$O@67Bj>r zxTbYcR%zJ6pF!1tQar~KbiZZp;1nOql#i}TYuNQF*cZP@^!TAMG&&Pm9wMoWJ^Q)F zmlT_23{;e`dXv}+UTmdj7cw~u|BC27>^>r9Id}~&CUjDmbkMPzFyy7r(xQ4M4w78${Ycmf`+`FOp%{2*{^+a>7E9z+7B zo?Q7OB$V4e>bVv+stY77pgN{!%RCu0O{$pYYpI9Bec5*z+362wl_Ix(qDX@$hOvgB z{@Zj0XFg3v<9CiU&dkp8Q*39J(ubC$mGTk(Zf&0G>D_pAJXb>Ng4yj?#E-f9C3kSL zd^Lb1uyh7Q^bI&`sTuq89Etztqe;h!w6b$``{6&RkfkBAJZSz^xiOEc(qTLZjRhQA zjZm;b_WXbsmH!^g%R>1=ngx!=kx4zAt-@C;^kZXi4PZ`Qp5*DUY9c`Fh;S>{Z=Gj! zl^i@C3l_O$sl)k-gVPuh;QO#+kyIhT>9;tm8zLif>rEm;KVV{}W)0O{zP`Jb0cy}c z3}uMje;h*_O4e|;nyAL&p}GD$qxva5u`%%Ub)k3V1ce8Y&p}veh}^yj_3fmYSpg9D zq|7Wa#uNI9nh^|kgo0SR?hW5K%cE6x%|B?wsz=ZyYMUC~sy)D^Vc-C}bwov2IlvF# zDSXpz1rEYkk;Aa25d6zV@+`S%JBSkK(3Ynvs5VSZoGq$*BdyhdDo@628zl;>ZjKaZ z798}u+{7msXHOl$g#Jl@#Zb3ao?EN^`AMrvbUVuSfR=Djq$Y3 z^zci#5Lv&xZbvZ%F?i_Tixz8_4<8&1_*8Of+2Zv3g=xgUiU%(<%uP+w4Vw5F^B)FRZv7L-kbC=P?FGe|ImSJ`RNTN#q~Yr?y_F+t&lWWF zJ38^6nckGZ!lKE~7)`Wt701^w_K_LT_<15f#{Wm%qxnbl8FQ{3qU7{1QIMfNSti}2 zpu}t{#)C$8vMrL2TCD^i{c}D)tlvd7?{a(mVf^Xt>o*>^3}PPCNbjuI+`se;`(c-2 z{q!c2a|h^^$P-B@mncFsU~DdTSMO8bcK2l)CF!LzG^g9TMh&;f(6ALlqKTtVe$kE4 ze%djew~W2{SkP1xG)K{>-Sg@2O!(2*QAu>c0&CwnXlCfzMp}42W6P1Muuh)m>n4LdpDOm`&Pp~@VTp?~V09*aW^^n8nLfzpvx20G zwvn@)@T36mY2uV>taD!4HG``)0X;eJdhM|z3)^q(uDy9{UCp7=)vNthzH>;uc=2}j zG|gC4)^lj$Pc5DpoCOE@;VA$hTE--?JndZpLn_3!GzeYCVH59%-4+uQHJRvAw5g(L z(_ioY-3+us`zZ()^4TU6cUs{t(#-40IuqQeZ;3!HHHQUO%zy6S@oDu%L4Q5^wpn}D1b8W{q>ue4 zAf=x^>->;2(F z8b|LIn3a(|wO8;U4v73k5qMqXiFKP%w1UF#Q3e|Su#`-djV&G6y0X=50GxeC>uJuw z;2Qe8y2*~dda*jYv0s*j>0wS;fF`?1M#pX)!8T^a2sxkc3t4bT)AeTrR_)LG>gf2Y zoB;JptkpY&fSj++Is9;sw8mMN_Wj1(v*&-0&v^elSO4kW2VyzkcaovhEP_Yf6|WYD z)5Q6#R%aYUD62(^6;57TIrVnbx=4(vXUTq9RMKgvSusYqV#L7KuczfW4o}5rd6-t< zc;TA+ooPlci1;WV*3D^K?RjyrHZXDubB+=5#x?NqQr6oyU{IBbDvDrCdrm;4Ns;#C z_aEn+*WA1j8}tL{nRE6j*-fvFXprQv%mf%%tu$OacKbMDrm52w0S}Ma@zaao;-%<0 z%kNrr3@sJx!@i@rQe9V)vkrt2Nh_^`cuU=UzW z{kr_+Z@-;giz@l@FY|w29I6xYIMl7=K=nqimNQw=ATLk8f$u` zA}yt;vgwMyIw$_~T5lUj%H!%y7E=28he@Yme){LKi^)Ld+syV1?%VT0jw4l<-%c8F zF;2osL_R{Ki3qg>U9tnVMzTcr;6Jv!T(+ki``fxcJ}l~Mx!-j&#pz?K&hsoF9*ty9 z+pyoA8zM&WRX7+2wY!p+dY6o!n(8uz#lzoI6zV0fQUP5r$M7Yjy6AJzXdheag&J>k zCgIaRTf;qz5QZ_jE;}}`=lt^Y`1`NlVQQttdOe|f*`aIm^AKMhtJ5YA$ zl%-kDidCu+Rg?mtp6ZqJ((KrXoYQ{S$0fq+hmX0AbXMvGi#nCzphkigkt#vJnfKMv zq>_6ndmek}I#tN@gt62O zy+($89!=~jrX$8vt2#Bx{6n9STc2;x!zokih#PZl1-4C!;ZvB&@oN(&DFL8)E*~&-*wbWs&k)@+Ko? zwG!?+(VN4Hj74(_kqbzdh2!IcL~pqL?9)g(?xD@*yFC=dlpy_*Q-0>yAf_?mlfIUJ zZ6kL^5yK|__RH7hE;!-W)D$&7hG`y%kG7ym0ZUXKUBq%R@)C_B@v?o}UCq5g+4g&19Nn2jtcg$=0%j8-e*6Le?9lFKbDR{VkGL!{ zlB5_3s(M#&EZJM@;aDHra+0D%KXQe%hhEC z;DrN>+vUW(_g#=nsJ$Q%DrUeGTku})Vm3>(EMyz*!9it09U^VUz&^1;@df6xHn;8b zxleTg zcdpntEDFF6dgZYcy-`GTxeVwM=cgHhXrkyPMj67XglY<(Niz!gl~N2KQmGC4BP#6U zeWJzTo57h$ldIN#a(46X*KuYTTNd$i3Lr2ruubUa8oIx*-&oSuNCGnd{rR*$pq08D z!Ytyu8Ao)I(e9zPp~k0ycc!x*f%i}D;M1$>$G4;$BdxtR9esxC6gK(xp8DZ!Q1+i5 z@|fxkcPXA^>tBD1Lp3uGwg}=Q9qu-F!Jl$(OFwe}*;Pt*wR|5&c(Z{;?Ff zoR?NE*1-Uwh^I?IQgXrMZADlI4iDpa4HyO!@OiFVdl?BxB2blwz@7(Tj&thUXCr|r zp!@id#A-xQrmB~co#ajoo{o)*mM#PC#$6K0u0^8p2t7xPhJ=Zp>mqe=vaq2{)Xdg7 z1wXJcxO#0E8nqWnFi=;P0l3k#^9=Y*ESqC6MwU`%l;uI{Kv_xdcJ%*gNksqO=mNl- zfNppF^9SZch34_q>*u?A=0v)a6))z~Pibd5u3oza?=T_!_e-lYNwFLY^GgW)o#|jL zLNr7V3=pOcXb*VdxzaIt?(-=M6t!2E{b8Zpt%0BCS{L0s3FFkz~xEApuL zKy)6b1y_K-Kkmayg9h7u|cTSxSe9+=NW#Nr))YAx1MwO?o)cch^0e^jtUY zzgn~l2G;A0i$`9c>(D#mRpb5z2|CyE<4AB6!JGy4j+#mH)$7!w+%S0A_B{sM{z;`R z2)(}TPtQHV-?y}zMwI;AlkIphxGELHuG@==5!R#XGY9i@`3LA@$@{R;q3*C^#WH3e z{$#;j%Xn59z1PIXPMilsEX?$tiT1`S2eQIV72LCiqXq!tJhYWfGRCq^zjyTKC*%1 zqxoh0v-y!LInRT45IT^tim`S`+FB&QkO>!^5ltK`;1AVY(oqcb(0ZM@!-Vn zpQVfw0UwFpu#;!!#UH_=3~er{S_e$E;?~FSS>jn8Zpw>vo^vjU(KW~lqO*uKA1|U`cb3<_!djI#x{k7)?s|pb8 z*UyL)t0-YQd?u5EEjHI^TV-=!n!%uNZqZ3TC3~iihy826e(W?ix0`&8x5ne^wnvoE zmK;_E)+1~u0Js>^`rUs@9+U}VWl@b`Z;;2`q2vZ_TSq@&1!B^2JGnjhq)Qo038s<8 zHQ3m+ZVoWJ?vDQTU!na_3Dx3|#i4}FqmGcctNUC9wr4k&jKmS654@`o{>RJgsmL;K zuXiE&tXbvpW&6rr@gmXb0Y1hnnC%2YxqjJd_-Mb|xWB8$`NHUqrBqDun0cTHD6%pJ z@Z8GN?jP~=y20D_dFkY;|9x?oBUanV$S$$vqUd^Fsdc_%{v|u;6vy9k6o9ui^Ppiv zdUw*i*1+WXXFG~1hcJj6zwC!r8_)0$~_Qsw7Q^{j)KD zZ%AV3YoES_wf{V_hLR${S987}T>WETwqoZb;?+wYGsfd{_aG>V9?Nu^ z%%oXo!1lV|k}ci1jg|3SSQ(*)?`g+%6W6P9b#%hrZ1LZ&9o(a)?$EWU@UZ%*c@U=` z*NNsSN>Y*B%;J&(KvZ_Ae>gSXf44(MFkH@v2xs41E8|O?LB9%lD^qBbGC{X_YH5!O^Go zJUhoZbH>^!94c>LUh$f7=~tb3Qo6d6IN#a#K%ge~m^|U4;xBDd`#Lbzen#&hT6#Wk z9l=^xRUU?|*_6orjifsiH*wY+gfF9@n6g5Fa03cuv?~J0B4jw)QmvPd!OsgO<(o32 z$t$u>e8lSLKzvqdGA%cWQyb*sMFucrLRr}*4d*+m86A@mefr0b+Y~c4Yx(AnL9)ke z@%as@$bNkBf&u9z08J#_Om@Gg@D9=Fl~d8&NUzDqD=x1TCG@y!t9Pd|3ih5xoS&uN zYnzu5d34`?3Y-m!UxZzLee&Fe)NZ2BhQbSxXOE4%E~(h`yw5iSdOp#kxvb3S9}DWc zDl8!bRp0}GbTSu*j!7SWS=AWKg7b{T?jt1kJm2gfQl?GWiUE4Q6|Y3{p+qTHpbeVL zDyLD3eyz>CZg^q@r+G@Z3@%?#sCz~))r*E&TkPZJstR-P#0Zk6jjdoC<^kyzHY%~` zE5{O^hZqc`Jl3PmXNt zek2UEj5Z?tqVol$8LDkH-+aTx@;uVmheY?(KCKL73jwooG6-24s5qGg2UUH?53`lZ z6TPnIjWt6pFM*2cS~GQvNQI~y#)fcUN!Hw{ky;F zop=9Z^7Cb4a}D76$;~NVSd08Eat>rt&#Z@I@GZARMiB3iSuZFuI8bvi&J*>d1%@f6`q>RZ(Y?K&?2nTCZ=59tX^0 zj%V)^gqv6Y&wBD3c+I8Iu)tJ5%ZGT&wgeX09}X3DnYH&(@GN3|DpWwnPi49>D^=!M zVfBOo?tOfL%4K}k*kxeZ7+~n4Sp~^vr7f0zt271k(L4i&AF{|Zb$#@QesZmt`9rf{tUk}Z`r__4i+*e3{Buy9l)P5xULn!hXcrcZG&W>LB9Y--nT^~JN(*LH z&^eygHlCj@YzZ7%5>E<-Cl?#pIO7-dXA9FezUF30%^a%v$?Mp~2zEeXI~x*MHD=mq zOLf*C%|2`L4`5F5@j5mOG=J<5x(E&zv@@*iJiNKR@w%~E>XQ~*kqtoMLH>xz=kAS# zcp{%$PElBxQnNCy4kMhdi*a&s;#r~mNy;Qk_Y5~ZqJv$m6!I0P)uR6BkfKTNGEX^6JCrgS+TamHzvb}3c6j_13Z$!ns5 zKp!bZgWqjb2!7xECU1D*)0eNePOU0hZx7%>ENyJ9Se{g2@jzbza6+9Zr@pt82ZN_d zoDD#T)kt#^f*if9nsbl%y!3jOBu;0cE9&2yczT4}z7bdH5h67@r5zY}7fG-SqCH4qBkF6Z7gD`s)PX5FfM=WGz*0toYv|=^Ax_ic%GB{ObI_qzycN5{{7bUE zGDB~EXe?gaaGIN(&%qjqPE?lUAypB z(6E^EQ|a9M@~Qp%SF8%Q;2#Ex_El%Zy;_?b@ZbWDIv~KDaC`Y-mL6;t%pzH0U3~#N zopelvpMk3}L{m2Dx>NVgXG+G+&W8g85^yOwr8(M|d}I<8T+ z|HeYisq3!|P%9xfxBlGEzzzS;i@cYE%-=42{<W#0IDB4N9U;Q7CJ(>;^|cKlGaZ)3r2@hIqI9OsJnv znMcA9mt8Q{%MXj3;6foEp1%|w&J|2kbDrO_ubpQ|=)QbCqDWAJvypYEH@=So)H zY+in;G?_n!#XIJS#zZteplVz!nR|gca9CZt>j``dWQ5BaH+1m+ zZ|Lxk!4KPUwY%%;jq8uTSJvz5>Yo2}{X)yx;poEr*<-Sjgwx`U2I^ihn>Z8{YvwNuw9Oj_B~Q>71t5?RD)?aPe$ZGAB5^pIMvdJQ-T%n zDTw2WdYBdiv9CUR{bFWlm20ukhb!+ze+f)mvk}-$qy@1iu2lZe{zG$AiTdWpY5$t| z&I?cw(l9fjES2->UCbM%t}#!UfN)~dt#;z+8yoxoDaqGf+x(YX4(AJq&>zb$)(K7D zn%ruc4*cG8t4r?VHBB2g9QUyPcv-y`_26stT>m}}IVzVt;k6OmQtu}(`=il*!%9#* zlsarujA2uD}Jvm z_~Z@figO-?H~~7#*n7w>p`zg&aizDv)iOyIvA=R*VczAB8)u?l+5s~jfa`|sWq)q* zUMtH}NVj%84ROo0q+@QUD-!Q@v703+)@Gnv!zD%IzW*w^TsHqb_d|A>o%N}dZ`h2` znva)%S$6G67!|Up1$B%>j#ODDA-)0y_P`jUcqG`YVmVN`DPk*Tzu}Y@2(sjuvre-a|sqWYGPsqa^J;NvPH6u8t>7kk-kAukxzw zq3Kl*Xcj$r%$~7Wl$2J^XE~IR7GS0&Q9WZm7S|)%s=gfVmA8WE*k<Y@QuG`S9##m*Xf$TmR64&pY;9e4?3jXbQzp*4A4z`o)~U#}aFFIMfTM zrMv{^N`T~wpwmzJ>wakZ3(~Z;EM^x-7LwN7|A6G+QZiBa-G7cbe>@Y(TByDc#yNQf zCm_(((Hu2Kp%;r;n?WFpAXep%xz(GeTPez@(QZW!4x5ab6GZdPLdNtFr320^r#%fzWT!?Ck72qZ&0xZc2Q`&#w=caK&V1`! zG+}mW;fNZKmQ}$zW;c+{O$Qu@eQbCC%--4QI0*_2d>6XJNSdab4Ggpq-ksY(QCk3w zIq++p+uW(yO}BTS_<%l>!+a_KB>6P`er|rliYRvvBGxSDIWm~+gNFT)N&6P}Cg9kw|7W1(lC`NdyWk6W8`#iInYpF;V7n z=zQwu*pI{)^pGIl`{)PIaEj9~M!m^&@1*;^%(p&lIA82#H?uEfrM<0HhtkBfXXM_# z9_rx=QRK%bQcx|WZjj^eIz<>^xU#+cv)k5CO#%+WcHTU4`!eco^3GqsoZ4V^ap$cU ziI%Tv7ytO`y_lZV=-urAD5hSF@=VY`?q$pOQXomVP#3Tt$}$gAf?liK>Ig4D8>Z{v zHf%aME;5J?Dj`e*GR_HL1b9r_Co)J{`|r)ykU&yR)rJ9Y`Z% zx#itw%WHN9_ZFasbkB(wk4VpGiA*q~8jBu1x@?O-R3(QTnEdj6bYnGwrZjo19C$?8 zcX(;75ERcq&Bh@-x8-~~|Ks&BdnG5kynocb_UNyVeL7ZpxNw^ETOBX`?RcCGgHmE5 zLJuU9+&VuSH$i)q)cPd%FzO)7Ci9Uo9%&r~VRc?YA=@x=V0a_@5!D;gZWe_WS8oPm z+zw(1i;Q*zY=om9ehBMY~%u!l5mQ-a5?Sx=ZRhmQ_;+>G_gF;F|-FdmZYx1S*f znV6Whe%_mNIm^3`00JiPbukfG3A3a%%Cqge@x%)yQ;kM$%7CQ;G+9Lgo*xYk4h}%FMJPy)@l0opWh63VT7?9WD_)s)6 z(7~veiZSJx%u!Wl6`z!G%82c0I(u_-R?>pXGU$+UfD6j3TK|qYec@YX(&_NfcX@Zf zz$nPY2YrYCDDl2=Qqf6AAyFKgzU@!<-cY&@xU_L&%eZ5Ra|Jm#RMd7<6jq3&ntKk1 zpnHP`I9;*y%_KIo-3t__n3qau{=2#D)RUR~e41Z-_B!UvJW-fKIh8-uRiy8wc$yUC zRdM~?@UAxlA{U(rm(*2kuVHdeVoAVx%^p+%ANPZ-bsUxf;#_nx@hREq58kfb@6QBh z$$J$0vqw#SW5QASJ@Q=C^RmDSuSmLmhl>Cijy3^CSXr|Eg20l&Jc`2>S%0cZKy!l6 zCpjv7OZA-;2{+s$&wFL1ut79N(27NE1+l6TiKdS(*|B);_yC7UEl}}=5?Z6(curdy z2^&4$98H2W5%tJ(x)XnSZB15ldoq`oQ$u{m6#ZjjF$ud3Xp}wE93uU9c^Td_SZH>U zAQ&5Hu>2hp1uoS9+X~-^|A(qK4@)}j`@eDD&=AqU!~hX-$pq09vrAbN6wNi4%#pkwr6c!pgE31}r&(o&rsnXU-)+6PzZLP>mM?WgfH_m%u)7 z$#C)wezX9+o8{mY91;}95Kahy(f9&!0RU$$Uu|xk2l}YY_E|)DD<-~Q6;+r&VG5V} zh%g3*O5#Q0gsaxh&J4WDF$?V7Q8wUZ^#8tpmHD9zD430~clzm2*XZmBTERMWkD%GK zM9H&>EU6&kEO&s}-XzI7O#l^2R3m2$hES|IO4!+}nrhl9695*Cz=d2_2%5gYvfxP2 zc#2%Rcivn{@df9L?_>243=|5`aL1WmX@R=lJ-@+_p^)#u$Uo~|hgU@cIVo0bOWCGl`Y(B_X$mJ}pd zQ?If>ag*(M!R#<&K~Y;JEN^EiqQn^b0mI65b^UZHpj01O0-1PSs`{RAvs$Io0Camx zOern&nrSzcq^U0?$g$cXkWk*V*=stUt4qHoMh^%esu6%r*hmJEvD`P$D2B2dMz*#X zQyI}F^x_!wJ=I?8*0;?A&01Z5TKKe|xHA~>4Ynug!5b&Px|KR|E^U$*J^V+|?;XsP z+{Ie6)=@`Tha>#Jy2vYP7dU3m0x`kQ#r|{A|MU;cSKBShIKnkV_QBzZ{ zu-0Qm%$l?-c1wnO%U~WJbU|PMJ_E-2*3XJ<&lrA(rKs5vRH) zO8E*W%)&h9hQrq^TG>|14r7F^M$?8{Fxi8Esw+Nt`jA7K4Nv6;KS0bH%zFfX>^q+p zP_AYOsIe7sZP@R;!8x2EC%A8c(MU?BxvJgx_IOAr_eLb0o2!h<7S;IO9`*CnV5CcR ze;jwF*Y1O;d+o=WrJ-k4mqhzt6JDh-`$P<%d-Fz8qX~u(SmY$(b$4)`Re^m?8i6KG zOV3{oh>R0|(>zkq9RKJ+CTo%;kF`qo) zmKt|Z1(BxN<-Gp19oxYgT;=~@)xIUYe

8^mn22PXdyHJ-9GH$#(s9RI( zBLb(cM_CC(r0xe+vKl{oOMv$(kS!9v3nytTP+VUh=EGV_(T~~z{GH8>O8#R(Jt_o)rUI{VAYSAi@usXc~@Vb?`@nk zFX_9qnDy<8#iI`@h`D*Vkp}jS%19pEtzth#Kn+lSqP`>??xGtipg4`lPmoVYDsfd6 z?aUfxzbLp=o48(Xv8;b1?2XUwGpv%9n@>*n?%b#!jy(nToX7+vg6B~6Paj?S@i%K* zTdLy3(svr}%0&5os>UnSz5v;u#1-F%TAeDLBRfVpDGNb>vGaD9Ha_1!2ieYeRUyZ&)<<5ub0#G2*tbz~=)(%pshV;QJ zsucS8o9z^)@fu4v-DjRb%(A#R)jtvmvl?M!h`W#Jh`c?HiS9qzBBBZ=G`y@zfXYDY zfRBT{U*F#E-KD2~DX78QMU=MA5Q%5HGwddl+62eG4pMg8SWk6E{{;BzB20>x_~Btm zMf~0oK^oReJaD`Ci_m#DBo&}{rNI$!hzIMrTNC~<(&Xj`Q&v&u)r9c<3!08+BH9pS zYE*S4eGzp1^;Ax{>ElH}o)$$HNnz<)lv7Ph3Q92;J6sJ0!s$d4i5CM(<)u`l*$V;P zV3lQZF@@Ho%T*#1Nwn1@wEBR=@XsJ9oMd+HXnOXE^XY?Sb%SK&?U#U}?yf-I0SHGe znh5vfN;g;TY*OvL){ifGQf-O{d9n6C=FLYAL?cZzwpyFA$;ME&rh086{ANLH<3&K+ zzJs|X>bX;aiRwvO4pNO$&V?y8YL5&49EGlGIm!^=^Fn9@n&r_KJnp{Awku)a+iA@r znL zThGhLq^Z$_U7@KD=zKJZMqbKhq@%*e^>jQ8)SfJz@e|Z~4XKP17d3Y#G#BQ7DmgKr zni@tI=Ea~PbXG=-%NXe8Q%c!*A&)(9!PInvMLJ71EB}5^ipRR|Q#@=|y`43+t@AA3xVb{fw(@?(&N6 zTTB6DGM4WVMlx#YNHj+D+Qc0+!m)cW!%WDk#Q||9;z%vzB~kF?l)?@8AlFDf!XfTj z=MX)xT4hOeWVs66q_Uk;5KG7B_58dAt8grh$E^v7IpqmhzM%dtr0M4rL-_T_^M~s0 zLQ*e)|7IUuZ|kb>-#pU*sl*nz6i-jX3?_7|dfYA=w=o0~h-KT7d#fpzl5~eBtDD&I<4)2AH{4u_oqi+Y zo__8;Lf0D5_6>mhJ?kC^dXzZ7L?=&DY8P8>FJvq$-71=yy0>qsFqw&op#VT`d+^S{l0^1&^gw$^tZDpOU4G8KI|o=c;ggq} zI`5oFeP8se?^cg3T#$E8Yool`$j3E6lZLkRQVoYtNxmADaZw+g6lq#|dWWFry`H*( zp+LBNr7ar5TN?}L5x75f(`fbP_I3Vfc z&aWL4XOCvvVXh-T0gnMkresAEGipO>oE9TnWcdC7ReF_?}UvV7|0K^U+_tXNZ{HHGwN%o%U7d45hhM&X|DXriCopO1c*K9G#~r0< zAPRIV5kR(@f;@^k#P_0@kc|yxF$Dr1h);sp5%f=0LLrv%Z|lUag_-0T>OLW(bArhE z1$gagnMszV2PWtPEjqixHYvllohSG|e`z@__qja8d z53idR5~+a2i&8L`LYbHV7DlK5iH1>b0W7pg1{*?7;_2q7KLr(a(+m?)8nJH$VLxI2 zfa8o)fY!E&t2cLq%h~%vn%+GB_Hj05@q*VIqX4QoaK7)GPJCRGk>B2?zkDu1j-QdY zUOqaV{H-v`f7#5LUep+Dx5Ieiabfl7NzGZ{;+@#(880V`i66a#W^lcItW4HGRjL z(BqYgkRTfAd9KC$qf;c_Ax0H=-a+PPq4B4{F51sTKRY^>LP5=#|>%G?h-XhW`*mj5Y9*cQ;Z zW}|mVLfbqx!(v>`4uiEerB&cK9(5HWBXU3bye3hN9w@8jrm~2D3_Llfq^-Um^F@lnL)uEWJ?B zU9=CQ9sB9-#66Uocja7MudL2zxtSl0aJT5F zB?eZx1}G&7>u&9BfGXmI+R%_fn%b!Y{4(34Yf+o=7N_>L*smniWq50Gs`}?Pv0J#6 zDk@AFkUd*(4uF@7+XjL^8I?KBX?uh8nu5XVdanv_kb`{fSzc2~<11lPI+T)s5^e65 zph`q*03}rqV6_nYPOOcBu^A-kRu5i6qjs@|u)9tKf8|C<6aCHoEov?Vn#(FH^e#N` zUK_nqf9{vBcee5MPqy7)CQaP@<63GX&?==H%;oIS;`+P$a=uY!bz0hmX{3#r1Wme) zT(=xa2v9Q3&*{_PUJ|}3jy}u&`nrJ#&urw>^3?!a=9zohzqaMT2L1eD>QV$9{pGjh(+rRrz$TBXnEw~k0)s_PLdp&k{ry@ z6cQDIL|%A-wU*w2pel^VC3ga}djj+4J6S>qdfFfnPIA;6cKy?>oI|XL$7gi*RAe1=|De5oir>WjiYbPO5rxmPw+o+>4T8t))k4u3MN%E^2G+nqm}Ha4ZZ>9;*W& zIM;hmP)f|nw-$T66o&oH7=eW*6Y4YKlwg2&Zz?O0luKWb^z%&VA#W@`a`1=BMbg=f zS`HyW&aROHxdorm9IV4QtuHW7hNXyu;hcKTttY}m1wa86=3NOT1=*|+s#A=x!p@)vy{mC}Cnp~sZ>eF6j zp1yjF7E;)DMu}XZF&(0TOP%cLdulpjTQS>+uJtEDzl z9q~F{v?O=w;!?v>s)yCrK67Jfe%mkBd36}~R+Xw-7jU{QT}#o>%09k*>?fCEy9h7f zY2xdJ=dUArw{WgZn8=KvCi&1&-;Te2ZbXrbs7V5|yH678B{Zl%&l{R4fR>{*z4=KE zG_>xJA1@N=?cj8%whcl9dArA#CEzDP&LtdQ7B+5QKutwGEKzwcMO6+kO9qRLLTLqI z7dK`qL={p(C4`ziY!4r&Ty(oteH8wXE(s1}9b83Krbzv?p62 z_tI07ESA4CL=b{AEl&C0%*o8S(j1h5_1)L~=25UGj?RJBIO*3%o`~J5`eeqt{pK^5 z_25)@#N=VbyZVQ%zLa>dX=o|w_t=h+Q^mH4nxK2oCx@I+XinYUeryE}8?5y9>&2Z{ z*EDI)Z?azFd&re&X`f5f9JYkVbT8+;R6{r9tmgdD$Ioo$5juC(k#RHL~ak`4ls;+At>UZw> z+t-`6i7{xFgkg~M%_73ZxaV+r5M4Ck@q7%|{dVF0jMF1o7&@c2At8DN0ao_GY^qVf zlU?_$H?a^aO@U4EUnx}NF$O1PSHL^3qc`>Dj8EGz1A`Bw8;%t8k-3B@y>P%!dL{*h zQD@J0S7QFYll^~xxWRW>h~yS&;oa$mnPGTS0JUkhP0u3EL_vMpLvrV1>#-1BUtJZs zY20ZcgV*?&2Vx5&z_nhfHnR#!zVHr7^AXwRW7Kc_@8ABAZdi^&6u*7-*k#+CGgUQDf86$+lr;XG5vjY4}(X+6B4FslE-=QtBr4ykUg{$DVabFoRTJMi` z6#O04%n=In$?50qsK{6}Aa+1MMt9!G++t~W?3!IFlHrk*%K7=^{Hpn`=et7Kk7$87 zY)}}eoF3_|jtl6_OELe%#b}q0q!cG$*>-jG;z_r#)*^W*u&Nto8sc-7y2be;a6Z18t0 z--v-9O8qA-eUPlVz8beq>l z)PcC@(73k!Dc?tr<3lPpoG`Zmnc)5lcJ{&Hy#6Oc2CTi|hnhM-$8Mb4sqiVq70@{G zp|a?~;2*`I)-E6`us`tC?8!&7szPq%Iy8?T?~e>;9%#k(OeWZZI3yLVAqa6%z9evK zeiW6CoItYrlhE{ZKX&SYRpMr@Fv(Y)7a^71tPI*3Iy03yWso8uISC=U(uO7@#|}`wwRlf7;ZnTC z7(Z0X&{oMss{<1~{5>k>H^wCsj3Q=oZFEVAsLWRI0!Rfhu{*6{Z9fz{4`oD$xq`I?1?9B!CvV3AU471YsfmOQ*yr zH5oR(Agvat7-;ij?bs(>ngU`=UDz1pp76{a7}T-?AfmPxxhw_NBXjjK)CJ z;~U$iUJVuLU9WN4Z1Zz@uI(jAVet?weG7k{ja{GwYK6q0y}-TP@j`UTYMud2MzAiY z09ZAd({vuarlgSCgvxc;=dd<=phu}fdHT%@CK`(TkVSTKuC}|g%}2WNGE;q)kNQ4e z@cbI_$CrqWKFady-xuqoE76s6w$oF!nLQ{$R*2i)|AM2nL_ajBsd9IfG=*GtH z4p6vGFt8@9I55-IB(&FA#B)+`^;RCKwxIoFwF5i-cW3z8yc7cZKL2vs>%r)p&a+wi zO*CNpzgI1T3~v9Ww#Rt$4?%2sShFinyOCXrQGg*O4NiI8D@>H}Xw%0TnLHH#n4W|I z25|5moUq{~u>J2LM1YyoBTj$vxSmsjL^Y>TVDU=7+gnEtn+vAP*<|ljqMc6NoiB$T z%U~j|#%Nj$Gw`hT9&n!V;uH3iiRtU##{7_zYbAqlE#2g z4_Q!s?g|n7Y@I-s@VWZ`U!w5fe-nkhszd={m%`h5|L(2n^O7g`?hZ}d9UHvUbEWBO z=hZ8n^?#@c=zGOKI9wmu`#jzH?xg96!P-v$?GI%)VhmkvUyfYvNW0Ua;f+RIq1Dp% zm+Fld)aqj~kO_$B{+igIMk6_8p`NB?iPt+-baC`7U&OR1_Ub6jFy{f;MeUV0XI*$I z);hn4g6zU`J)BiTcKY%!ZHs$JDZ6zr$v=#xr~gXh^_b4U%{;kZ3Q`@9SXm_h%oE!*|#L;0N)OE)lofU zp+$>7(cGW=Z=ZI=P%uH)O4rtv zx_H#_%ens(<&!;tKs1a)D`Kw8Sip*-k*xSES3L-z200&m&ilufdeZoVXPFzbBBw5j z5w*#@LZ_+5&t*k6V-Z%StMIO}^xkl>>H~R>70o&s`4Jg5MfrAZ8b!xvf1hb?kX9&# zKIWcnb`wxSC@ge@An?gFuaPTheHPDhROVV}dhi#z(VE*~;j|}k0Q(oZIK6fc{f*=0 zLW{DGv4iF=d=eb!hPzi#qMG0{$7W11_vhs!bRj;&Hyg9%SsY$u^IUEIiE-BBnOu3chfbs=t%OcU1)`5()VpsY&@pX(Kcsv1O@FhlMs-zHKR zFRF>cWM08ih3Gsy#Lk2TyvNbii}EF%dsMOF#<56LS=(qz6&-+;5^{3}ESrQ($g9~0 zejx0ogG1~!IAny?T3%Qr4Lr_Sz0<2npR^wEQYhmPYQXk$(8f zwkaeNWl342g*y(1G~2O~j@Dh7Ox5gNVX~@?bKiXzyRR12!$r#*y#)6Qybk;ziN=XcL8&z5gu=<0 z0dy{srdf;?ZE%vPMN3i6&SX@{LR$R=wbM8JEQRLw3S2{Ly8V~qRr}E{hj?tSgKLM@ zho_7S%;a#VG>4bv5r)$zHnjw=0(syICV3|xbvpssdcm)P4V)rNi?IaZD}9`1Hx1zn z=9SWPVRM4Y9(Rt15>e{oPDe>A*ln8`%Ta?*_rt7j`&)R!@lZw~jt)GAJcikTQmqw~ z)~acgEJ}8S-g)Qj@U3&E?<84Jyy~JDsb8|OSkY|vkFOdWL!Kv@a50%NCz>AI3cP#J z16awF$O4KTb-(=d;mSLSY(zjr2Bhjj1L*Ad;g#X-o6e9@GjENc%H&#f7r1R0N-+j7fx_z?H` zrF;`!+|GLCMXzExO@t+kA+7d6qZx@C|6N{P`>Rf7{YgEzniu=-xW``jw=`_w(>OYz zL{j5AZ+w8Z%kmn9ru?v6l0C9ZX1F5~g=U}JR@{CWp!13MAuvAKVg)W&*PJaV2*M&W z2#9NI3_b*n49CdVr<@o*a8)xaV@hVV?dG}?mnSFB5pM5^r*dM&kewxP*qXhg=Mf6(T5_2*2zL?sQ37p-Up{eds2x zZ=IsaJ&O0^^5(*8_q@Ax6;bGlgK?W&aN7;4BB%Lm1!>kuK%bLCp%GrsyIJoQEv zrGBSj_^iK+I*8>|?O>o)K+-K#^#vSv=OIU?+!Qjnha85tCWB%5xq{bCJr&~yYVHS= z)z-{4PVTe)fLww>a6!yehjc<HYFnc>DTceM`r^*j+w z5hwccRNo=->mE&goaz+J$H1MPlKEXLv3p)a_Nb#>i?}9}E8=Wrr?c^BT2+Q0*u?f& za)9iq`rU7KKSqM3S4>AaPY^j*KNVS5ZbSz&`VH~MyNWmA;jlSP`Y7mxLlD$ZC4SVZ zu(PtBZlzCm(^w|P(^|TCeFdWl9jQ2@s;X+&jiwX@4^%M!(|%4F$Fg|f1)mC8=-%?* z3pSn|n4}4JhTeJrxtB5aE@pXfUjWy}3MFsk&r*0LPahv~-nra*;MC7v^A*x_moi6U@ywdq+%x}k^Q8QnF2X_n> zKi_Tdxti!b@4Vmhi{RQk#MXLX1k> zF3G9zP}A#OlB&|;IeJEKxK&7@8pXG4%!PPU zU-p(uxd<`$h@07b4SbLW|F(XGYkcKDdvjgO@5!vV%309HREDb6N1&SkCyfwrzv@A1 zXG^Z0*FG~tBsU|OrP2YQ%98@_n`ag_5e?K_%__X!tUi+B( z;6LoX{`CHFYTl{6@Mc9A;P$m7KgWw5i!ld&NNIEsvU(|2%l~wL8u;O+`W{_Bh4xT^uy!gdaHms;JEu;!8StFK6j7(+V;l{Czq$zqN z<iM2-{QJ&t+PHXf#izX4ke5k&3BlqxU7pZ_;p4NlC5Mk18ZzS0=mAne!i7< zh)jM?ui-&k=8VOrIN2Cok$Cqm=ZD45I`_b(I1zVu9qH=V(_Cb{#Hirxw;tGcf=i&! z9Q~+U7|d$uogee+DgM6g=^x;Q_MW+wPceLQz%s*^OO%G>>ZlCIu+s>J}$`rWOycVOHigq9yV>QoAN_ui?Z~xYl{^#Bvx<(;6 z0ZoOW#1F4fumGJ>ANyh6dgH#%4|6BVZ~nEa+Nos|XY5U`y#<@8)frePx;klmX^c0A zX%Mi3A?nYI)0RWLD<@`L+V8H9xE*V680W0lg#GYH=wHQ`)d-z(!w4Mfoe%?o{?$cfTt1TRC+N&zIxMOL?E#&n@{z#az z%}K7BVEUi}`>6qh8$r6-z!!J$N;o98*m!|d8ScsO@I-7n4M$^pI*PfiI+Q(jhHOT; zpOJ>HX|pUB{X>|Y(AEoC5Ggn>gTY$#ap%Dv*U!`Tck6 zUwgm(*Q_BMc6dAkq>*svqNi2P2J^_ZeU0{~Z2uzt{oYaQ+WV(=q0qIw17&NEbKEJI zs_uNRS<{ZVO7yR~o+bO$+-4@X7>4B5nlvv*z=4xdJx{1VH2Uoi|I%oA$6;x^>1vR5 zz1?3y2g|k%Hpq6W_`?-|D(M!eR8QGxz9O%|A?hNluFS7#EXsvDw(jN3@aRuokT92x zi)G}4ABD?NHAucoU~`^1ZY-&cp%em@CD05&sdLiRAD@$?pJaOKMoIBkq%qY`DLMNx z4nBKke0EciP||wv`*iczg!mII;>H!}nu;>5){t(HG)=n4Ng)!t3Ig%{T7X!Z`6&g~ zSZovXe-`_*;!Iag#?PG$Q7MEmlo6hV%1h)+*BzTSf=@A&X+l_dhB0Y@i||8233Z#1 z@GHZtf-#EK zjnmqsGKp04UJBr-+KV8A=h^1%9>(+9<}CYloT& z67UV|MDBx%e8wDz)&zJllxwj`)TOuf1HP-pN%{bmC(R{w6M6j}_nd+I4X36#-cRaY zz!vsjxJQ$Aypb7>+h)!RCQPk0S+i#jT(2^NKom_}ii&JqRwK@>+Z;&$%-NtXYU$yP zQKOQ!XQB7Q{C?8;xsp3mS=67;nc*_l?;x}N0<_>BcS`>5f8xAx5{>fS&^mf;RCYY5 zX0g0V)94IBh2|*@WVaFV<5d-T4By9hYN`x|wlm2aZbsXL)DUPKQf;Wu>i>*D{QbXy zg_A0H?%gkx3;!-cHZddY`b=)>*kft>ys*c6Z=Q?&=6a+Zp?P~zH_$i z5HLdz#V0TZ!(Iz^BFt+Ls)_w`wEf6n%#J|)&vb7rqGQ8N3I|p#%qSL_crge1rNW;x zYf{yX%-5{7TU`6Ca<{|A;vTLt&FO)}c$4*x+M4tdQrbdMUyn&6_UJ)d5v7MfGecKq z^#5H!4Wy^MnZ4c8`gY~{u3fo@l=q-Pb%wBg217Z2{wK>|nZf5kUd*We>>L5le}#32 zQk>!y%GAGzR#~OGa{eMTa2F#a{l~C!JNG0oqFxt+IG5Cc`+SCUsu2QZn7f|R1U zuJ%E|KldXwZHY~7l8Gp@;6pw_`AQ$$bmcA_yv7cRwZHvZ$B)>yIUZ2>JdJg2t3S6z z_wZ8Bj`Nn+y&fK$?-$vDj-`KFEehSc{`cVVPb<&%e?NpV3b|%ai_W!hu)lt>(&>qOqX9_}J=9W>7e6JbYXIF-+A5;V~BMEzwDDklnaqwzJ`5HWB% zFn4ZuZ%zUFqorngrK^s-E8~wF-Vd>!s0M?Nh<|N^-px;1ZMgf@3b#Uc-e$Yqs-S8# zGm0xGWNJZ~4uIUPl%aL?e{7{x(T} zE(XyoMAxTjO< z5PeE4%=*V)a{E_Xy)#6EL5Y_C#EaMs%g*ugWV4%}@i#Lap2c{aHyV^I?q0@fTn&u? zUb`q0r7^%Y!+(5;J-H*{B|xx*1pJe&nu@cEjxfCC@%#TS!>z=y@bKT!i@B=QHJmx~z3bf)>qYUKvH$7khLRt|dZeN}vqgAcD9D4|`%cZKBoY+TBFE^4lq6x(n3JPwBmkPRfg$+aG@r z5_Y`rX8g~}T9oag{i`7vXE5yjQ1&lZek8pP()?@B&EM4T-blVTO|o4J!R1^w+j0y<(hZYb`V3#5xMa5Zb&m_8px_z5P{Cblu$|L8U+Fa-1WJ2g<2=5B~Ble%R$UD zf^qbw^`w+=a_z7KQk;V=(Y$T!S*)zYC3dC&@Ot8bnO=iRzcaGSxmHyxy)i+#A>)_r z#0!^Kl(@;bIj^_SyM_mJTi=Ig4H&_BuW0l38EUyWx+MADE3rA?br9gEv6>Q+KoYL8 z;<3Phh$zj;Zl>Sl+HmBpuWt)d?T`!ODUaL&^s}lWKRs2S))4VB9D4tIf6)E) z;DV#S#sKNy?Y9ZEnW6+dWGxg8RGaws{f(yHhyJyZKC5U=nmQaqorgs7g3wyK0FO8- zqDiEA?G4+6WLoGxPah5c{LikVDxu}3d~CgsSNkDZ;ul_ZZm0oaK2q{g6&FCgm4~ie zODYwSY*qd(#$ozcU=h>jqB&p59qT9&7&YE@JJI(J<&Y}NaG5$+`ze=4%M@}bl2bt4 zt3)d0UHi^Tp$y_{xe=dr_|@jEBek%+N8dp&9JIb1KXIs5mDRkT6nMtZG{>^%rhHC8puZ(iN%}PktFTK`z4#9I~Fu-kGNU$ zEo7k~u14ZUbP_vi-PLEQbmM7zP|?9gV5M#Z#~dc$W0&#Pbd{WdVWh%-b#BeKZ@YMC z=iw4A;zCXEMlJIVQVKR`4uAv%6#Tc3yhe$g*zSJTBA8(R%(u@_RCDg6qrpCn-p>h*6X~j)0jYp1bV~KU{+MCaFJr_JTjEP7ARCmBspB zh0rhbD!eOeoSSvuP;EJSp-$}u3m#TRnjQuHvB?D${oU8B#g5t4J=LItBAUMTuTc~!Wegh8!j)Az<5ln^^6X^T8OW%pfH7g^X$Tg zvjbW>rD6m0duohLX#v%(Z&Wa?NgMm_>{)meV9d<^1uD%?YN}fBfS|-@g-~6)F1l}i zEExSOXb<{?1->-FXP)qv`itip^@&kOHTFcGsU&-Dww|+ zF!pp+hme`;6pZY4;`@@Fn7Vr2loAw#GQz~eloOHj{p8{>RxNrKf|;Li>H{mE`3*0L zKfnEb?jVtQ>6p&@B@;itPC4fO@}2lH;Yj@PLwoGtl=BxK2WQT7V;`Vr zx*k*Cg7qU$2Wb!X>06CvUHJEbJM$OdV*+AjhE6ontD_4lR0Ny~*!5%HM9Mz3hX2u7 zm+T;k`!jurFP?noD|C>4Pl#6V4*{CPZ|0mcW%zI_m4~>{Thfpu0bwf`{m`9YatBDi zr|2G`Il;o&{NU*mpsFV!UZ+|r)F)24exX-@vIA!l2k& z?<97I#6~d1w1780S+(w<_jW&?Zp}s!eIkSE|Nig4M_*g3*x)m_GBRCq0FkTez`BLB z2Y0jQ(`uI(=I{^zYp2F@ooGp8q3{L1>*U(6|JOR=b5n<%lqn2VgQzGN(lZ&egoC(3 zzde@`mkMjgP4a}by%mCkxYp3!a|3UuifWryu|p#afU@B1UNN3Y#}^Zz?&Rp8#XY3A z-WoHqBU9~b4SAt`;t*q0*zpfr9~|aWAFHn`CoHyl6a)(4!T5=fZ{B8&*UxA)?ckXX zWt9l-BfiW{7Ik- ztti>luXo?6GUxmCeRs?nJFWRE=rJ!Huhp)ne_uTH&ploiUKJBu!5p3oJ||A!1tE)+S);T~yTF71BsKY;(jj!KZB`$kTF8_6y;#QT3m0vkNXdZ6Hx3U6w zksjf!eNZ1yLJ#smJOnPx3#6^bL^bq*IAhqnzAK+jUQI1m@!^vRv@?Q+J2mFj-!c(% z2aalG*l+aL;a#;}RrX4p8w9d16J4As6(ZqxiAeI*u^F_Xl+kfZ9KY667S55mKSG;_w z^=&8P-rPcyU*lw0k;V%IAEdg@5UKzSvq5HoJyf>$H6#SBW-p4Hpcp%>W$NXfka*$# zC|~{bLmB3uN+~Tb+FqnMippU&GOpw0Jl=%gQX3+^xMnItf*}W#BdM(!K~ZC65dRy* ztFP97dJ+&$u`SKSxQybc<&SJX#UuvI>&0I=3#vL?Yl`^r&h21SX>tZ1ZM?kUCs@#9 z?{@K@3no)3S26(q_68_7-RgkaC7flhvk{t(N2|o8MVkL7W3W@TSyZq8aeJ!RGywE$ zj1jaA!MOUJ`kmlz*_diRWv0Jnzx(7JkgJc|edkVS#Jso9#LN@!w=y`^#` z4Qy38!Lbl6(Vw()&2_1w+MwhpDF|3GS*Y$2iasA03dS4Jo2>#=Wo;hao9PgAz$C=0 z1ix=Oci!QVsvj(3E^vs`8AMou^L(H(Oo13Gc;wzAWyDU}jUyws)K@J>zD;@-y?-1W zqhH7f!82I71Y_4+HhL1-(2%Q6Z7!u)=)rM%0^XXJ%H1SznsD2N1I{vu!TgYMzu7-& zN;$hUm=7Dn;f%<{Eit30b^zfUqxT%*g4H2ce1B(GVN#+INrl(7%J^w@gnJZ z0o13d@sI{liTbSzOYAPh$ZBlstKz$ZQc+_cBG`?}~A1-`YXrL$=8GQ;tffm#TBvU@~-MECRt$ooeN!(-YT=#`B1 zEvGC9-dQ6|MZ<{_%=b0rb-^=mKTEaT1MKYZ+sf&#o_eh7^HF~dA&ZRY}?DAbp-c3qxCC)@eCP{=1ROcoO=e@rj=Js?ZNMkJLl8TI<@~Pz4w6_a*=Cs*&Q%pt9zkf&RkYf=d?asudhfSg( zneiO-?N{-0QJFC0X#KUHhG7sOJ6+yd_jWsNoR_6etUaq^S6bp|-s6mUSL zyf+U>p@u~sC$xFbC9&+AhSQt91I7bRwXI4z@DngKmGO;OpQQ3%H3c$C=#0O`~F|+*^Ieh z=kG3)okkvTwG&RhgrHzR4qBVjRcA z#%IuVsO?DrRIWh>0E(y7EhCpf|0w~jb?RJYYE`=`4x%XTZ|@zAoY!TQMuf{drkKyQ z&#_ev6P%?_9Y_zJtqANT$`Lf`f;Bbq5<|h;d(np_H_QBCLyI!7? zfkFzfyc*H4!=U9kljqq0aOzCg44d^^uIavaq5Hl+4W3D}D}=cb<7@y$95m*K<~lqFooQFsBLU61Ob}PKd5GImsvYhlcOUH7OuPeJoHQbSGqVnvgLi8$fYP z{^yVbYwb!dnx!QAM63B)<96oZyuU8$8=BCmHUF&iQPP&j)~a?9f3-63&W&j@!LMl6K`7) zwr(B`UTMR@$Cfs9543HFZR`jh$dHga#u|z;$?5VO7sJM^*(h_-VP-C(T$f6R$W@3@ zq01ABQk~Or>U266zxVfd_4$5pH=Eb{A2*x#yk5`8^Z9t(k29>avdZI2h!#O&UD60z zKo=RAf4p8J1=7;C`@y|ljuJ_fcjXYM+hM%O>hb%1{aZZVouAv^3#xV+pM~*U^ytlf z=NmpBvY7elp@DqC&@RIfHQtcDbG+r*hv$z2s;WK(!%MNSIBKaQ{#FJ61Z+-~Jp z{}waEJLagW1BOBB_)@O|?Mwfq#HqDKhx4UiJ%!lSB#eBd{2i`Zk}y)Th(7dA!{U*H$GK91(mtebb#+Wf=`x$2 zsg)kTWBb>moA~2F!KE@at}Dsj)L~^|2+TxOpSI4)emeL2kvA>poY83I<0FUff&>Q{ z1saM=`;oXoELqBHRGqB!jZnOOZLk9>XF<3vLwPiFq*Cl3VVEV+fKlIWc z#uY_o5HqTGd~Xc?d2gA6tA?G6waVx0#KD-lp7Z@wA%C3qBvYGOI&mH4-1!v{|yMKF2- z;MIbEoC&lp%xB4tR}^PtHlqsY#qHDB96@Tp+ZEHiWn)HQ1L*Fhof|pl5*sexJdIeF zKZ|xcvv}-P(r3D6>162zlP7oC>sMsn$L!voSYQ7_4Chq_YNax4NiN#TmJn~GUDNn_ z{HbHvI}GMsWCn*dP_jeT`w}f2krxK~-ZDL8vrb(6#iQ4aJR6F&i0wf;=0^7_D8N>h zV=PRTS&Y4+v5KTB(*ByL@_QDJn`uI(yMh;^D1ZQ&E1W zVtY^DGuKM@p(|5OG~3{P`HU#7gUclohc}ITUhoV#^RmOPz)Dvv7wX-x-s4D>TG<;C z3&2dWhT+Ao2=QG+&ibwzcl2KQyk5EgBk6CicRrpCQ!7CHmQmKW(m}p{+@Fggx*_0s zt6=0@a6Ihp58Bcg;m=fBZxjQE7K?&Y!wL=qwLtJ&Nm$92zS&c8-g=T<~ z58Fgy3mq{7k{Cgx`&L(zI51IC%}HWaVrj$tB!D`FQt+QicZ%BfbogF9f5uKF2;a&I zluIHcLTjfi``$g$Ye|Td3fYNFge?8)cbVhA>wn%gY9x!;@(yICOlT-ew4g<7&$~bM z&^oNd={ymy3+T3`V_qU9OV1w&NBTbt*B!;{^ebWYr}s8t zv&`Vy1tmi#%cb6cXVwfXD_pK}fmfruQ4Q4+G?lJi>kkz>>f7X*g1m+>j|yWgyV5## zjY}-dl381Sdh+eXJ{;FWN-r$jxk}ADdgtHI=UlejC3UnZ7$@zR;GBg9}uV85lV?kSzVgw`5)d$f# zfA*fW*~#<1I`tBAjSF`xb!hrr#pN19*pLFi=uL-Y*T;X9D54?=7cD9S#w`hs<5}<( z^IRJ&gKc8#=EC$1whHF15;%`C-fBGnrVS?@`B~)ik%c!cl>`YSsD(cmdpNnDR5v|K z!@-Yex|l(m>7}6Z3yws5xAKAj5ep)r=a8I>yVD$TL`d7^*!-^sPi(V|U-$xupwBU` z`)d;)E;$zbcJK(#X=CDa?8+}We5l~bC@P?;^x%UMc#a?mMkRHby#&m%5hi&yB4ErJ z2h*?1Oaf-u>Uz`?RyF@GkUPw z(QY<}gGhD#dD->q+zPHCkRW?XC`x;~$= zvcon0Q?RXlUFopl;|%!Iyn~u>>-0i`k5(2j*|KH>3QUg!-MpYHWoHgw$?=4?DsI|6 z8sFu%Z(#11=4O^6A~zMYQe>f0V=9nEkf}u`oW}8qX-u&h9Lh+sms9fJ5Q8hllf@2XHxw`GVDXswX?%LwKS&_T_RrMP6e*J)NU%9H=FKw zwBKUQ?Pm6=h2L8Hv}!k+cVT<=1+$`I@RD;4iU|P4)fl>Hqw|Plcgc^F}%m5#iUbRqjY#- zj7VC=QQh2g4{JE&kjI6!oyiZR0KdRk8SliE-!D>GFZL3YIQkegpuKqmHbl~N# zYfgKPeb@!+pHBY2$^j_iKgt0aP!2Mqe|JDNHg^DDJy)9R+grP*gtGF|(kC;~ipp9* z>=FEA%edwiv}4c=eoA+o9Sne%zEVE4*goPuG;W@}miq)nBtn9uy^S2pp zM+gwlK|)n93%Hu$7{m0UIQld9d0llfHrNIS7ic3cloa~(GU4D(Zbk2q7A(~&nWUHQhWM_0)Rx>k4O?j=WD z^rlr4_8rBt?BB^}pWlA^Eq7`5sMXu{_>#0Z;~D(>UwYwklx{!F`xLCDv-D5-mu&80 zKA)R5xg$E!wgF@Hmq~LB2k%ho02I4W3@w8?=^B^+ET9T~8g8Q4eUN-4>;0Xf_czY( zK)FtjFPTgFiOH2>H_FOSuJWK~hVnCSXq8WHX}3j!rhrf5H7nho@SG7b@=2?H_Pf#- zBwPJ%kHe+qlnEy@4E^SG9_gamF?3rc)^S-@zD(P#>!9vMCV&k|s%S6ndQyuI%Q1IC zcaiz}!_$9zPI5nU?~Dr8eG2C%Co!t7)?|ZIF#9kkN4L%Y_WUuw_w92->pc-G@A{<(SDvljOiya$F3mpdF?6!jER9Q^rv< z?Zll{_{@Fbec;aT4k~}gY=1X%Vq(XyW{G!1TENN2DKT9C#`5+scFk}Vb-l9K+~UWnE?PYeUpyT1m72WW>Cu$21+X>d z-DeoC`HuI3W)4c4J;J!idmfV>Dpe$CS7zc5v=0R_uTIAONW^}PEx29=uzDY%daVyk zUH7a`|NpBFq3*n$igLJ=dH>dsH_jp$D)jcXnGq0$4seJq-=0x;7uA4HEFLU(r;(wl zVl$3voC>x-@a6L>&8PI&?c7%AzmHAVq%?TUyNnhav@$6EiIMCm{5X}2vGdvgAbCBz zd1bi_L38{!RUU+EfxQr$eDbD&u1uQ2x#X@1 zvAI_IfWA5BZ6g{s^qyd**Al;`d*5#trY8Q*^H-Z}e0u))^qAexq2%rm%}vPz0EAfJ zfA#nF!^-fFZ6Cg#yMJ@z-2E+K&9MX(w|XK*->>ID*uS68eE9yl8%Az3%~|Y9Fe1L= zRx~MC_6gpgf37K-9-AL3y#A#6&ttCP*PO-?{9dmUlYJ4x$Z)meJZ3m0yGYF(#YbC{ z{R(j<(qP&)fMKPw%j)E)Lmpr_o<5!-_9tv~#91!-)_kVHs-K{8l^wY+QL6M4e_JBN&&(pc#5TP82-Q#W z27kS>%Nny%Upn>J2sE9X46V^K+uv9sgNg+b}k&{H3>fUztWKGj16YU~J5c zbHVK%zxGG9W2}k@;(P^K&7o zv>yR;7C9{lrs;kO4U|{L%*uG;;}l3`I{Rk}M25R&yq5k7d8F{rkN)Aq!Uf$>gFEvK zv8dwJ)MRjX&(rlf3l^9zHP;#*E>VAXg@@(mt5bn&ss2@rnMWg0=o_zCqm~ z1yh3=ahA39F%7isqfddi6E~uYNGRvH3-jBfYC;JYUNoX15c7NpC!UgqBkcyL_Pp}y zNccNlIU6d&4o@Y?dYBEVSQoh5^5nPYD<3_YazCKbS?gj3ynTKz2AY_E*zxzVw|`&z zt^)CN*YVJ+y!uDkjGuSz@vi4S-5Fc0I;3%L3bfML91~Y)l0?tKZ0S0u%w5(G?U;>Y zaUE%2dY^<-yv@CvQTBQdUoSAvLyX2rxZ~$8Rd1BH)mYQW-}zlICHpm&r%bhV=Qor0 zp71Eijt7Hn4AV1KlYDc~^g1m=oAwlUH4sY{gM(>Opah)-p!YptKUA94>FJvWL{l%L z6G~G?T4XrML`b~`8NNgyp$phyMKI0#H!DwOE6$-C<|2ihtvQVG;6#^@d+Ai}uP1lb z?teFW{PhEzw-4aJvIa|!R2-Q9y`A2&th_0uCVK@ZSR47BxL?*jLPQogR1;_9FleO7 zW2SH(^n$MfxD1YuYWDT&_sSttDJ>2NTd-LbY&gd|CoR%7qpAW3VzzgvP1+q~mClS{ zg=;fMaf;Y&Qfg!8Y)POoEg#B!oJmD7@N%E{@f4XTxuNr2hP8oNGOu$eBJ;wnQJjys zB9OX9EJLLhK&cYZn#Yj1kz-Yt18TWL*<3AHl6Kdaf)kDOK+g92v+s>X)_5lLc6(~w z^qol>68S*w@c!BXby)U{^2=E`88wn(70lz|*E}+jcxH8Q0^$p0IUO}pe%_GDU>YUp z&Nd-6Apw(8u291-94HA;(?)YMC=ZE>3~Kcxw<>UAinmU5RY0kaISjE|a<>^Y$*em( zhn5jTD1ZT|cV*^b9f{5SP)$5p8^qe~tYrp{6@{cXf`+6yxuq35;{t@$-{9BghmHzVcpP-pprH(wfKT9lM@Coj!Jt^D zrBCm=$w!}`-cAEd`ti{?Tidkhi-s0yX1Cadl0vLMR>=j|wG-hULJupE0bbT9z#s~X z%I~X4W6MTGV=HA`^N-l90noUkDlk`BdLKr6#?D^wy7&rF#G6fm{PJR^bypV1Istyb zyMkRIGK^k{H#Oze=la%X^D4$XTqXFrG+%Yn?nxDvepy~CKMcCD8Uyg>ijUu)*tlfz zi}HvDr`5?rtC|2NR$-gA5dVVI2c(_!tX3RcKEyL%0xF zGG*^spHlg1RLZt*LsOLHLoXQktsMg)ADI|#S5L}&Rytxtv-$k&;~smiE&P4 z$FzTOo@SkExvR0LH?O>}JvrA^8EXG$PN>@@5^1nB7b8RJ&on9}dNveE;IBj8fnpro zs59>i3Nv_9VSr2piL4K21jC%d!PpfBUyn8(nxmCC%{o9aMc%{AQK)K{xRJvRLJp&N|$ zkf+DbmhN8jOED#8vW^RGSvwa$IOSyImD2}#0D}Fcp676!{aGfUC-)}hJ>I|B6JXY zqwm&br?;aX^lSTYGkj`m@?e@C2;tT`Zvi~?F~`60)~|VPS>}kaR5~+!@3=vaQH1%7 zQO+D#==CzR9p-1-9#sXhh6Dqa@)yGOxW|H|()^kfARU6~6X?G%H6@luqY9GqL;SOy zah8Z6g8RjyEN5%5H^CD&ZG^{v>B0myX+m+69+g4GBEsa|uq{@#({gIF)9JwUSzb?q zYtLB5dRzrmP8uXC&>*Pm_4WUHlnx0r+J7*7BAnSIuS0^uXn$-`*7X;~1dHz|Vt=7W z^K?5MuEX&I0D%&cO`6aYHG)Jqo7rI|8~jq45}P0IIl<6HygI{p2BQHw7`MvpaRUPA z#It-xv41rSPrD->g{@clKKv3vMj?KSHdOk`5ReO1gvuTC_)v`Ta0U{JpJgS;sKK@# zZ&Pm16M30PuaN!!Z+zj@e?$mZK!j-Cx1574uU+ox=;-S1X}vm;C-}bb{_O>c@M6gm zoU6&bUOB2@<;gjOqdyL=(AMut-LIAKT^?o#si$R=?v3=fF1B;rLPs0D>@i#Nf$iYn z-~`)_-kp-ByxFeuvn4GWgrMX>S1w+EMwDx0J<6pOf|E0-A&Dd6j1;I`#NkOLhrA!N z8hB(`grP4T>lq$O5A+4&uuNxFoDAldVUj1snv$00!i0*mJ}hlrKM?4zmfI;0{`b>H zG}abOCc}2Q78Vvd@n9$synoUh?nA=Fr=^8bU^y^QA;#&jDMV?428L>DaCPF}l0zQ? zTv86aBzn|jW?{9;T$VzNtPmx0WI>^q_|``%_{b4Je(UH3(G{`of$lS5;N-)l;k&7H zZHzo`Mom1JhoFXKb4;o)|d# zm^(K%&oHq4khrISG|4E?PiyCIGBZfce)5EO?ZVJN^ss`BpGB&}@oru$?jngb-1G3+ zhpo*|&OmRE`f93zM2kxidvl7Mag)8Jjk`kj+FegM!UF5qB`z12YJy(r5%F87R&?(f zLN+`>t6dief+{6PM)1(*|mcGQ!GGp_jKoA=uKLl`2!_`ZI zwseEc?(GvN6W2u06XU@ev7F>^cl!9w6E$MmJwV{Jd6=i{{WE5GS^k_OjIu0z_2vjl zzUTX*HVf^-<~skf9C4P9Np2KU7eewPU1_WFzhQ1l=+b>D3p^UPikxL>#%C8_tcG8SRRSelUYv9x!|17>Oc z8D*>av(allPk%L;4jD-X0_P>O12u9k(kiLq5@;}G_Rty|Xpo$+4(2J7@{q&9I=4M! zvD4?(eWc;kwdiPT=C;*<*=NY5o{bi3HUVZ{>nnW1XgX*!77Yd=g^~dhFyG>|KMDre zYH;k_g=iox8IK$ZIe{Qc++qaQCN@ZviEM4Y%;TU%)>sXCb-iU5HedLI`P-Cw8Uph7 z@SCP7U)@sHneVnQsS-*K>j_&K?{0NnTuu2^^QQ)6Gxdj4kIi+25T{QJ%dQh_)_TScN?9o!Tps+0;x}AKi6gxIH50a)v zcjPGJ%Q1*frAb|_(j7fGK|$s5sfx_Sis`1z@(2SiL6+`!%y^u^hbKXNz*tLPcy-)T z*1?10l7#MA?2IbQSCfP5FcKCHPxA{epP%^cjd@mD+eT||lZpV!!tB{0x2;K$0&aV5 zW?5VhXhKZCc!i-bahS|tB~Z&Jxz<4=Yv4=ecf^IoNjEs_KaD=WIfURvq|j%L6aMx4 z=NY=TTqtYA)$c5~Dv+KC%tCFdsd*tZx1I6Hl&&&xaD09?j$!UFVUV7e>Fy#_5UUkL z;P9I4(?-OAaP|7R2ja925yNJA(5%K}vg?{hm*@4tzOBIdx^x%R+xqpp&!^sI-HAw6 zKj2H-my+N%%p;a8Mfy!GP*!OBg+fH`#XK13v^IMj7wj6+-Nf+fS|g^$h1r2X|1gvg zmmTluCx3i)``}h4rFKPK4gN6u_|e-gRoN|aunG2Up|mCqV#Oe!>?U@8OY>OM1oJq7|ANE|AC^G1dRA^Lr2TcyymQ*f~Q z*DVu6SG@M!%HJu_ilLw9RG5NL(V5PSYPv?TpwdaMP;Uwa8G_;jVV2=hpp9Iw&IYe* zs9Fa!HJObx9wb&SFuke+ecVC+{DD1OeXnbOz`I==Yo_lcqvC&9c;sV@cL{tKlik3`ak=TwGQnJ|Q#7 zp|gg#1T%XqIBv?27=f3Upt4DU8-(cGD>EvqBu}W*Y%Q$;pi6M3PMh;Av!-1i{=DQh zIZv5Yka)AJ^|pQkDvQTof{rfq_{_N42DrbQCk=8#UuYjpx-+Isef0Rx{O)_1>S=-8 z@9~vuzZ)O9_-+DcEdhT>d(+s=9@c`vbt7?FL7aGkf!975vrtKqqZZj-YE}hnbYdMh zxzM*F-22;sqa%S6v7k|+9>`s2 zTJ!yd8z-)69zAG94}n9!33_^C-`d+Z52uh++p;H(PzS4*)>%W|*-MD= z*5@1P=tS^-aZn$|^A-}k!EAyJ9UKXc%ZZaysZ>bQZ;YlfTe5;;nUhZ16ZT(9L0l-1 z&ZN~-G&~gT=^>~37vciphh$J(a-jPMs{m#`a~!Dq3SnLevTtnTzqs zM-Hv0Jf#|l6D^4m)QiFG%igZ40SaSDJt<0w0*L2Ou+@=!&f^C9;O2L6d zQAub!rm$UUZY>qE*&bHR7fIlkSP+Ql3MH|Fom^s8wfzck5D)_<Fp!#KsDc)$5w>4n8R|wx7b|-QbcrZ!S4gT&mbz+CW~D}2 zP!Le2Dm(QjyKSgPrnyghM7gYJi^8I!!VV8z?_$%QS4n2x*Fj)2I68>IVu<~Gy>NzO zkwV>^TmXQ20%?2)1m(m(fkf_nfBmHo1zt34ejYcZhb$P59z=32Y_n`;@wG`MHsAmV z$beFni)cAyW2j`vopr!cyhU5`g6NRM0_ts#h4j{w%G?*e0e4Hw z?*-f>92R5up7Pp8*CPI8 zu1^9hkbWf*^t(7Wd@w1^dEIjT-J$p=zx_R#kz|!D_I?l9A*1bv3`wI*w9*J#-kU&~ zje%!w57%}*rlHj_9RFz*y!YJh zY0yuKv8jm5TYZ7yHgx@BJ*0X zrP?aBU0af&dSs)COTu@`Ll$|~`5c=?E1ciskr&~BJ>_^JI?T(3kYtYkXf)0PY~lhNLpsuFa%8qBTg|)6Z8!1w&laf zfu+Z@kCVy=xIpC!L+>{vQP~xH%n`0ww*RsWKo9fkFgQPrl*w>uE6<#e`iT7pCj?*- zl+@coxZuq6fe!H*b)8M5{7mFXZS{2Kdii~eVQ3tv2E=l5O8e5)p?&eDfh)X+0Y-Wt zO1#^XC{VcBAkjh~TQX@)N`eBiSg7>A`Kvbb<0d4JC>(yi_blm)jR|`y$~}ZX;WSx4 zr5%T=r-LP@j0Zg=3HwolLqvdcYYdqTtzA6}W(rGIlVWJvoXmvmn`?n3pJ}a zMTk{ugWY1g0f^O9Qet!}uvi>B+73FOhv4P^Y=xHI}$9$eOt6+c_=B^FoM+8k+Rzu0IDDj#{M10yd-2kH^?_m>!G^?(2L0GKA zO#n1VPCIG%4sMK_ARfS;&P*w0s;oemYUp6sFw|%NeRB|bb@6NtIsD@&0mjM!=Rje`@;)WJ#Dl-({a<%R}rk`8aS4PIv40+Emo{pJ_ja0}8QX$}eDpJ;ge)N4Hjtor!2nBKEcROMMKS-B#$p;J8;sz>g-R(3NGRLZF! zOM*wTeC%zjE&87Q=nvZkxgqx#mb>TKJnFj~D#KQURH~N>pXj{K`wRNp6xP8~l>prH zrol&$`3hj#7hl3WLbkFo{sPwDTn9cfou{wC_DLn-#aI->_IS(|(fiUCqyLi%2~z$? zL7)H%!Yf$)kEkPZ+0ksh3t;FwsVVfF> z*%5c+0+xPd2y=iC^(;2W7f!2yxvKIIC8)tE$CQkl{A%iCRZ zjZ&1U#7MH+EWj?SZ0hJ8s!hUgbqxfpB(PlhS&S5BEQQg>*#^m6H_JVl_%l zk^ui;EI>lvDFoiob7ibHC97QciyKI{H988}#p7^(&dI4zbQvp&TEQn&W0j`XkZJWsc}XwJKaLLAw_78C)(E+xBv_?o4zZoKC~% zO-b930PCR~D!>2X06a&Nr2QE5ZMviaaT zm~nzoPM@gkYJ)ZZ7uM6GY#ySc2?+!tn_U+A3r<( zabjg_M6cbRt2W1eNOJL{IIdb;{G>3T_nzszs zG2;Fj+h;Y^w)dw>ea|zf zZ&#Zb22a~$$4_j6RQ(*S~b zDFVQz#x=at7LdGWWH>XW4SVJ!kjD%B9nILd3}?EbDJJ=ljK8Q`$3ZrrsX)SMs!<)l z+}VCe`^##qcHR^h9)G*BjFlgf=n4(=s_w!MN>R4Iy#6;9tY_n(*DG`w&R`kmciE(J zkavi#k4dut2eFRJvx@w$Cu)_+upuOAB1@PXrA<--!W08+mV*)6j0`fs!-4U&g(^07 z+2}X#o}_L2a3bB(O`fZ7!qe*r8l1m|vVRWM8D30@*O2z&w~v4^=K8cz)f7huxhCIM zwW^wRP^3~73LY|yMinzTz&G2ab@5ae2t@TLloZUq2I^nYr1@&_Tosz5?`jKqNy0C;HDw8@gFSEgTLP<+ERMU|4L1Gf5fcJTr4k z<=qsq(dCA7A>+QxF{JTYbKCmaeE6(x-r2MYJnDeq^PhRogHBOC*DSpK9n&;5ls*@v z1xi(;S%mULf^OHBNuS7ozT=j5OgdDbZbv0S!0|yFZ8cBCX zw~F8S_^W(k0_Wxky3KfFEd1ehkyB*g*C(C-<3j8f2kG76JS3)DxxWH2JV+V7I3;n* zcRPW6?YR<6Xq+6DFtMKy6^MQR{u00Ka(UAXrjFrXTr%a_m;ShVUy^aK(-Lv)m-F-X zC&Q|EJ!_#3AlPhs@bahht9lrw1bG>=9MPCW^Um7w4i#M+E4`)-qH^L{Ys8QOUU}g| zV*Ft$Lzx;Zv(Rdv9;iJX)O(d!DN>CBv9V2>r!D~>FS=JfXLvoo9kKTQg6)@}%~Quz z)yKBWk1#Bj`be5Yq0z)4&@!e1=D$P|-qle;NCmni2XR2W$(;pc0dPZcSPjYdXF>~) zjsVdp3cP;mcdLGIe?DVV61F77{uHfCklef%xE0T(YxTdeT@#o1>*%$5bRDM)JN%@< z+pj66ZbL0WX-(2(v*vIV9KAPm35YS&m}u;X-zvA)0*AoNl+rlQGKj=86!g#a!leV9 z#BTRfD~GRsJ{5cS*DKNGF_pK{yB~dZPN>%+r5j5Q>)S$KX@mSy;~}-VNl#3G7>DxJ zr1nLdp?(IU_TOI(|2a0tMWSwgJjwHbkJ8lXmt%HxhUDbdgh-Pm7feNADBPW z4cdqn;pmR;2o>Ba zx)?o@L0$7;0_5z*X7}+G_Q6}5$4_-X`YHT>UUqs{YH58awz7qAQr>8>c3KETBs+XQ zL_+B2P_P+DHcp5?Os+PeQieH;(_YA@p?}1U;$nuqKjO0qA@J%mjx|jzSuM#U4y<*@ zYNWDTvSoCxORheMPYc~NX$E^(Z@3{(o z;Ux8OMT_$-a>Gn2GSzNbh_sF3reD>sT2D>)B?MO#kqY~tVV|L0t~bd69VZsJfM5MlF5mpB_T${?R~ zXpuvao1zOKpiG8L=ltE%GbwxL{`m#VG}&qK)~|Cf`|IEi8^_Sn9AUi5yOmQ8a5)C8 zHxD5bgcnwlDo^eNx~+g<+Fq`N%-pfrR$=r-9PSV-OVLWkk5T3R^#r?RbEp95Hg9sk zRtk_%=5DGNsT!4Eo0?e0T}RGZZJw(Lef7R|EUFHYi(jICHZEPBDG=uBJgvSF^5o3g z-vhofMg!VaQmvRP&y)!#-aBVssqg>VsqHcUb#-P5zqYT+Kn0AAcqs{S75~nQxKpZ-Wtn0&FGv?DZ%FGo93%Qy%UUXq6i_`q~fS z4HE8dvre9oNoGld(~J&A@WI#2K&`~c3nc#Lw$ShXm8>*aI?%U&>TOjpDs&JI=a_zh z--ccF#gcIhRh;&BoLhyFU=tZ8DjVk{^g4`E>iHSb7_m4rT3Z=Ztuxy$&+fU<;j#Bw zo}X{vL}Fb+1RQ~p4~J&$g@qL)m3Fpkym7P$nGq9ST9$B7<~kEMW;33bOppcfk^&ME z)d~g1t~UwZpdU>hnok}CtOSKXBt&y{$Vd-ucx>Xoj-99cFefLSv^PV*82%{wy62Ly z*-KLck^Vx7l(?l>pp{OsAkCb_F~k^qjPi~Zr^+p}sv3OpeKKWs9ZyE(RDk16Wjm@x0_e6#8VY>w`jTK*Vw%v!Rl0wT6}|s48Ea&$72gWxx)>SAqWO zeqZJ3bDATDnhquHVRZf9)~gcoeex%T7qf7s`q7{Vw={W#hlB%fXjqso37cTNajc34rkr1y>9I}-eEp}GZVER zTc(W9q_##L`M2o^d8n)7ft0Hr6az?JFSV^w6DH5c(w-fy`n09@fV#gP(a*m!trKu( zl}|OjIWrDmH9x~GZzZ?%oi;+B|GdfVgWa~1vr%V$x1etMu=NctMe{{zd6@ih>_(dRaYL+- z{zaGy!c;wOVtTnR%4k-n{Rr097!@<8Yc`X3x#X5CR4sQ=xVgzCsRevlT-(J}L2pP0 zOwmPi6(_xxEw)J}f;0xoau1jv>u8A4$@!WZ&*+PtOQ5{QX|}x`=%0O?j}~@*nxam> z>3YWg=d$sRyU}M7(Mcy_J`nZ{hVa;f;?wqlCakRM+C@a3wsyRajLo7=G%;kNyf0Ew zCYZ43X-dQ`M%pjYO@>oTBUdxAz48?M&8#!8P4D@>sYjlBy;L!tOostkDo12h_TOPO-tTBKcy^#pX5BF?W( znmT0Y*jZMY5pHFm(1 z&Lxa04UuxsZv5U?4@mkEzGB_SSb}a5Mvs4Y5M(xlE^hG4TZR?!T;fZd*($nHLr3t& zEa>qMC0S#`+D<>(Qkm!~mMhB@JTz3X0cOwLkyd79qbI+IWD z`*swT*J- zt6C+Z9nbewd(rL<)Q*RK$|4$U25l(2cNuyYa{MktchIa(g znzQ0aD{^25TV1LIkI7LN6o>jwt+k9z*Dgs7Z;r?2=@P?tj2+u^m~s2OmlD&9H=Mm9 zeo8uO0{r0UlGGl{$-ar(2t<80Hmm3;dS{pAnwO+Y$xv{zR>Uy)t_sIb_w(#vkZx)XaJLKS+ zL(mGih+m zahYt$%gaml1^pb?;X{8tO0Xn@o$zud-j&2&>SDmM>gkymSgs+|%*@5O2}_Gtt|)+1 zC4(^@bNol3gSbVj{k5|=aWxm27bsWLGENwS{%VW9`=VgdQNL14H?GzU&B71Bb(Nto z>J>&jY`hRphog;`vfEMx<^bLYd0xLHDaofiD?yOWj|Ws%{44rh+eqZ12_h%`3k$5< zACfVgVU151r}+41&9ZizRW%b;Untoa2pLTs6%R~vs9a^|VX~hW&(N%a3?ea0MI3@o zLOvF|uTgib$1WS-+<`!g1y2yg^lv-34c$RRVc+|Y#-u)=ykPl22! zdb!SK^Z@urs{y)Ri9XX%Z{`#@q^g}JMB3MlBi+VUmDWCnvT{_Qk&MBf%QHCFTWJAfMN1$_>d_^`*XV zQ$7>j{AgoIWdFlTsGGk3OV?yue~R9v5r=EN&380g2Ke_C-)?Dm@~YV|=#1{juEg(W zkpxL|;A;P##?QQ>j+cP{J8#~BJYZ=~ok8@?p+QJSHRn-H;4 z5s<)lIT=z8)nqKrnyy}w4#N~pCXLL7nFvY2i|a;zo~*C}f{SliKm)Znql3ipDBuOr zs5~k~IX8fIO_%vyUs@ggv;1By_k`a1z4y14x)=OZ8~P<{yRM%(_h;PcwfA<%oTulX z969kQyNf_4S(*Z$#%C9kqsJUC-Dp0Lf6)4z;zC)=%cjeVhgR>Bn6@1!@y6jHj=#BQ zHP$g^7(2I|2Z|k!&3pnuT*$NnlSE34jSXy$LlHy#QT8)F{+*rvb@-B6^S+9OV=L48 z!m>o%2Z8Ls!9YdiSoAp2xWeBz)BW3P_a!IJwbT%6R*0n2))Okqaa7728Go zmFp4xRP~5{gSpc_pf!i`31E)|*mt6U-!uSac=z(l0mjeizZWe2_luapl4WMuZ>T#U zgt|9n;x4j~ms5H_@wWk#Im8pA%I1`#y(DY%g0U886K7FFqIp@3Wq@z4wC*j~vfuMl z*T#u&`;LxXYRg!ag56AJQJ9auONl2R@3|kS)A7kT^(DGK_y%b}L%Y(w6+b@EhM3Aej zGKVILc2+LYAE)*vEk`{O<B%1|ay*LOLuB#LrwvV?x6GDRdci4S?Win$Oh7O1o^EXh{JD0D(PIvGBWnduv4`lWN&3&hI#6jaNYsJoL=lEP65a~Vd3Sgx-aYl#kDVdHIu4hCtk8rkTIE}v zzHkvJoGwHW5MOs9FqgkR_dYyUbma}9u0$4c*lxY{-Fu$ZxWDT0=$5?M_=^%9Vz2Yw z;)o3L7IUq7B0TZbjfTHb9*^!xv1?tE)9rso)CO(8sJ*K1^v93US`Q6DyBifH$Gz!i zw9X|PxVfgA2$QzgY=4(gQ|G~Q1m>=yK>`6ptI+cmg97KzAQ-CncxV46Q=1>z@Z35` z2GMPTw;tDG1;sa-psAWmU$6bUbt#j<+NK#f6}Q9k*r%w4)io^!y5Q!+ACm1tt_JX-prPJIK{Z1tqRtI@~fH5;WV+UWZwzFH3BjZt)gmD@Oy+07vq`i$a7?_|?! zldQ(-v9N~D*^GSA1^!_*5Si>HDn``BagdVzJuQXQ|7>F$K=mfGL&w0vY`SF!2a z-r6%)Z+f@Ks}eq1pu#7*(Agp57yL8I1ZTKsj+D;7@4jim2;To`h9OaO0q+O%AJ2sV>Rz{ z!+~EgoV7VSS;Bmu|3%fC#wDHp|NgjdNT{elYM_XFfm^86u&JPEic4zdDB^O4}?j|#4;n*I|d zjjIe-bY{7Te%h+-d$#&4-SV&R+7R>4krQ+{Gnp?U;NzP&6muZ}vl2}2&vQ+&MR`7i z^#JhQm8meW@(>TWcH`(v0P*2W7S2@pGL7s^0ED^BmHq2j+y$&o@$vj5%u~q?N)Tdi z`1tXVG)j=G5|Oj5Hjuld4C2%ctfMTE?ljL0oD?>!MB0qeVxgX||HHlqO6Se|IU94@ zADjweSIitfm6TStV0jHxBXTZJlV2Zt58tOnirP2zW+gl}{}r#>oE z+kxvHdVh0+PI2@o6uV`0Pw&ag0=+^DR)|Fe0yOZQ_@^U>4N(H} zmV58*D>FUg@oltKcqfzzGbfqu==StW{;C)g$x;1*9_M< z4(55+wl!MCxn2~AmEY}4L?W#UH#pe-$&caM-xUbN&V=W2hfL7w80AE;JCA>a)4ZLf zVH51R=exQ!E1{XEqFc{SDwk0V4<98Jo$cO=uFHcoS*b3a^z^IzrTVvh)ux|*ySLS) zzL&04?)6*h2AX5{W!GXuH|>a2yjmqe2aTZ|ww050u79PpUW&=0hEU zy6{}9wwx`+dq6+-d~oQA4bXgNUPPF`?@!>pJH2 z8WE*PIG5pyeDkH}z^04J+{3G14_}su|C$oU`N32x@wa8@8H>Np@SlbcR&4s;lP2cUUx&jcrcYQtogrK&^goV|W4KD)B$OFlt1V;*YgsU!@*QJ)x zxX64)Ik2{g;IbJs+%JzG$uUs!1TX<5*m^6$rAym`iEo29PUBuuhd;k(#~jS4BF90f zsNDrKma++U+4j!QSC%ROfdTK|uM_3-BNb0`PJ$M_zfN{KgSAg;CfLV33jDq1I7b8z z0Jx5jc2{?htPMoeW?jJj+e?xDvWU-%q|NasPqtdDgB`QBYS^z9)+<87!b(H7t|vz=Zf~6a`E&?k z*#vEFd^F)Jx;9xC@0TS2B==gcu`LI{fGZ6!yB%#4603Y+lTBa6hD6T2Gg*tkLQBR!6xg20ZreQv)~AD0;E z5I%cq|5qIbF>X)Q$VXK)#8tmqYi7dR&^G=ccE7vF6#_uXeEg5c;q!Ha_2wPYQOLRJ z<)7`^SR*ZHXQN5w^=5*7SREXorNhHGpP}H$QM*J zYb04ZDEfV8n&-v%1|56zIaMvv_HPpwe+=B(&+XHx1V@_9Sd^+`V`UFNm7`P+|NHOv z(U=KTJa4nS%jthxF8}>+hTyI|wTuzK|3X{p8#~(S8syE_n_F93ZZ>vZZ*6F9Ywo!6 z8*9F`(j=jYY;pYb6`yD0tl*1&>naDV#A}k~B%vOl$et9naWCClFSnwEDrAZnd1N7T zZxx7HDR*vd_E; zQX4l%7NcQ#kZdVUZJ&54?Vu2}Ah;px-#)o@$B9~)GJ0V4#R6U4EVQ?}r6Rbqcu$wA z_ezE}?eIvT56N~;j<}W-eS_+#d*~ac!AUMasjK&(Jti2toIFp-r=a(8g12HG94ak& zkDc<&W7T+lu6|{b{PflbfOI3el!us`PVFwRgBmH~G=XhT7Lipo1GbcEK+)By< zCP9kHHj3ww4F{Angg~pbf|l~#JKdSN!Id!hKrFfz9k1+DYaU_p@sk?G2)yoUsFq-J zT6%U&it2UhCBM71(aJJ>a8$Iq9=nhO-a>FE&^)cBDnK+Q`aHt+T7*4Js=C7grr6)(@^E5Jld(&{ts1(M{H#cYNym;(;Ma z!_{&7s5f4xmdI2zwJ2&G!84yuh4#}=Ze?5F`KHC&3T%NT-R(K#Dv z6Ho!F=hPRafc6%!o~odgEk)7yQ=f7fmYFx!!Lnz}XRS!K&F+?)U&HpXT*~To&6PO9 zP3_LO&p7{+OqI;ZC=Zi2xZhP{&D<2u(o(ySQCIz34W40KCDYxR-F3vWGp;hQ!3vF<^9o0(+H{~nc>vN21eZWKrNHv1*e)%uFO-cTIjIy^KQS2WmtufmQU z9yknBG=UPhiDqb+K56)YHeot`NxOD8-P)OE81xxgI&Z>n4XqL7l806jUSa0440&gj z8ZFe@*H?yZm6hcEMJbk$p>F4Nc$cuqx;@pUSBR7NU zE7kDA36!^v29+`fsZh9HE0@W6@wSf!$ZXHjJRnSp>PVn1qhNeky^}v+!w1@a@QLBBH zeVr*5j;T9<-8VOkS(e+rY+rE?KkVGeYr;2JWt%9emDK(tvQ2^a#M4s`g}G123S{xc z?lh-nIXz4Q1d%3R)uBwr)_&0pNuBm+LNGhN&FB9zZJ2{9(aar@@Y>v@N9ugff{)YH zZH6S;2H+9-Ns|JPXX5Q2Ft@$DNilhV@uRqXsJSAyX%F+IcuDW$qlGlTbw7l|yX{*c zs!?|Ot>D3}J>O->t&kJ8wYPs-8At_SSSN{8As}Y_stG{#yz042aGr1HAjbpK(lbxl zBeQm&%!ylA4GU{-Zbqq~UXg4WbE-+skx!^`?qjCCgLMvo4}GkGdGYwhL2{ z*PYo3yLvC$VrR6nk0K%9v#GE0t?dNQKGsfi01~L){Zq=+JkX#0Dg8F0_>zRV+DW$Am8qAG%AMrO877to>`Kby zLMvdqwqdD(uR55x?cf=#86)`3&qxsz0ahWSbcZ)AyA@ zE$KCu27Hh4Ci^v@roW0&f|eE6UM7<`!hFk5iOtcTZqh`v`hpZrT6p|ia-n+@RuLDq z0Ay7-(i&cqdxQzjXUu`Is8*J7yaL^^Q`@MhsK4}C^atlDBPd;%7S*PcK!~!ErEb<2 z?Nb?b?0qI)JhN1ymu|ZApSiY6M(lqUfmEok&lDSY2mlyPa|T3f{4j+}u3FW$|MIeO z`sZZkxl;c4r-BonMi~cAh56Qn+LheSsf79i&8x|l1~)rk338bv+OPwAnO`*~o`((x za_rW`ZMFOoAPk5m%|06)n4*4e*{(h&zX#4b5HYJuoiXsun1P_&o(2B`u?r0Fu4VqI zBeV1P^mVdukUle6IA+#xeRTVwtlTZv#oj&3RWgqo9b6DC9D9+9ZHx6( ziV9(c&isax$(~`pmQ~ct8xclM-koLJ(}6K(v~gXOH<7im2Lw=iQ&sDp?3soT$Fai$ z`%-=)o>*%845C+&Pt(WW*OZJN&E9gtwEukOxbe))hRkks5M$5)-m4Zjl>PnBLL%|S zf7s3d!76SRz&BDN{6cdwKTMiqL#5QrRVhgi$B%rzxn$J)VCAp1`+AF7vK^QW_Kyl01N1$Ack1ctT(q6~^U zKYs?QAVPd_0`+HAC3@b2+*LfhjN$C}ttBsCjz&$xcYm?CqtPjCYG<-5dPcM)Kn`dM z3c#DC0KNdO_-;0aL$*D0xS=t%9_-m{JVg3Bewg6m0%bfnK@0PoYiv_N4cFp;GLU7O zjV*Q%YM1Sr#F!ieDJJKc;x}@i_`=RSni0#hR3vGW$`#A+N%?Pve)n;!mwUdv_$~wW zEdoqwz5P({Vx?Jvq;$r`ABlIk5HNnt`svSKFjTry^!&whIAt?u&zF@mHns9Ho!mbB z$@-u>B*Zj;%yU4u#ET?;C^c+`vnJq2VWgl(<5Xs~^XH*|1{_D9sowJng8j7`h!uS{ z3VG!kTJeB`G@(LxI=r^N{B+f&Mif3^fnjWPH#(#~*VGVn1w1b~;aIW)^kx(mGmrChiZCq%qZ1#@oHyI0wK1dnV*kn&)X0fnlC0VYth#yil zc|Dz+d+*umY=@^C}!{+<&aZF&nCuI7iMrji-m&%P7?IAQl}`pbG-ZE0C^sh`U6JxGgfvX4PJI;C2(w!E5b zOC+ysPhd3O(sB^LoJMJy21TK&#oG|tbekQY?|T)<=BW1BP#YNG69=y}f6D0u!82pu zCI1w4F2mmG*~I$y4mrr^sR zhzwxS)2}AVjPq-U&d!O>-a4pK5;_8#J>9AG>GPXA1Xdt4+U$mVe4QF`Q*`$5jh_d2 zWLq015)`C8^zWB{4|OC^M4d;X>a0W=$+gb#F;u`Q4oD*e#adPo|4x3T8mt8XcA)=H zDZySx86*6EhKSwDpMIS`TSHr#n_HW%-Rzu+6!I54yKb~j33<=&bltw63Gcp4A#=yw zKUDd=M;jW*+NX?I$}D;zcp<1?Qvltzs$<}56{o6e>%OCcH*IR4NPg485Eh4Xk+Ie= zXA|>f8u2qE@ttU=F6i!M%uti@ncJUF^*mqk|2h+Rw(@D*=4gqr56`QnxNttpF0dWt zM#sR~{xLXgsyWlj#4l&rp-*}Q{I)E3GhcW6jQz$MtCp1l<=Fw$d-n-W=AzC3*lSDx zo)c&IKc+IVOHFx1bHIR}qyD5M$j@=1{^gYUoc)!1NzHUWR%&%V0;pOBE>Y0k1KB>IE+D)35pLX8K54@2!pFo%KYP9Utg(4hh4ZuV`P0c@dHqeW z5|7!21sgMsh1tRfmhFZPZm90Yd2kRWWtpX{_BGUYRn^0zlqHWnHQD_Wt;q@AyK)wd z;xepWKtK4j)bZE)S>1zsJj}mFIhGHGwj+|4sSdtAL_|rP`N)41w=;O;3gynWK4`KkW1_NhbxR zezoB8`r6)Ba_Klq?e9-j>bw}?Si}NQrqAwpep^7MUyb_8zy8MYD`9$<$Q_o`Rj)G! zRXQK-02YhOJg9xj&la~*` z72Cc=?+Z}66KexKHAAp2=7kqQ6*Zb`OiYTW?cE%$gAb^YbLMTo1O+crGq1wF&MjU* zXuOW_jQlX#Re(ziFICg{3EF{pS9^W;$(74bV_y-fpPZOV`TAzf_+%_!BM&HX=YWRf zob{<`&Ci4PAKTr<$rW8{eM>c}R}Q31HbA4-^il=?cZDe-U%CfR5H@AScMy_mDJUsa zUsi;8(p3yWA>(UgB36o-{<0WE&L)4;#zRHQqt6Bn*ediP^dIH+H@MW~8qH*-&%~Fr z(B7IY#%bIjLC+`EjtKYQN>7AjFZtL%|785Q2Sn7r4E@3jY2D}Ql;%nHtSk-Io%W*d zhGH+)8;CS)N?je~7q%HIk*i z=D(Wy%O4kIinvfi9tH;XF?sdSv4n_xGKXIo5{f;WJ*Yoqx-mLVi(q{en9a0%9wYla(HS_gaMw^}!p_7MhOzt~%tXUjGiKuD0A;o2ITYJ6H_ z^yu@mZ=XNBoVow!0CQi19`DO%cU&$9F6Xq3ffqQ~jtzSO3F7+)U>&uuw#$R}u<^*% zig7LV~c5X;P?h=BZDGb^ulQVkn|{de&-up z@!!{;d+g+iF@j-!DH-MPtB!Z+He{hcb>`~O zLhabyU>v{ig@7p%@2>)Y==C!^k$Sa#72-&ru5P~u`Q_rHX^$9Cf-w6fem&Gh_Kcx6Ckz+5$L8mwM`6%K>{sEJQVcA;{RiZ)_RYJ!qi z@FcKdVd1oYtlQnaBiNFeF$DeT_ehvI2|5gK*d%!w_a*yjwX!Rj%fWMo@NrKsaO|Na zhsH}whkvbb4^j$tLGjZQy_q{=f>s|=3X!lR%lJa-{p4XNk?daMn;F&}|3(?R>dtRk za*}`gSaIzkq*37woISVdaG+L?a^;_^9oty^7@;`qZV`PTIevML|2JIT#lO zvju3*DIzMXYgkK9D7m4-6H9Q+uU*$S-(m9_ZyBb`qp_kaK862%Y+>c5;dhc^>ayFJ z(R=^yI~|&jUzzN@!k7Ib#GLle3_{;lJ&Mn0V0pWUK_VYd6PNn_psgMyU3Ay#DS$Gh zh~2;>mWwC(Xng=o`AqaGi=8m5hs6ue+kW^#Uac`3Qplo1jcN*w z*5jU>eKCJ^kdsN2lsPA%j)0W{KSnN3y)EKJAJ(V}!n9n9$bP^Umv*GszngC)DWPfE zKAg?Pu^CVq)audBCW&0?=f&BI_;$jM1WlmWR3rvf#hTftnal9gRK?@bcW{P{A(@+ZS=ZyX~IVL1}S`(H~&;N}mdV z!vk2@+P6KSABfAq!qTf72aoz6Dp(p~j{3Kw9E*g0VTJNg0>A;u<>(ao8V(@1_I!2l z$^5=Hgo(QCdX>62-tUk|sZpkAo_X~VspWg&N_N?3>!H7BClnJg2BJORPbSXU1>QZ~ z5M}(&#t&;7e3^V385UaHBfs#n4}FaGgfC*M4^*NG&zuflS;mEaKtWqvcO^^KKHdLJ zoO2s_cPKb3mD+tqQp*4Y`l@`%gt07!Cg<@9 z5OT^3m0_rZb;+od4nkq*fzGv1!NsnmMazb#hUA8_@xyWFd<9#=noLx*bl626 zq-IA83=6=9h6&D8S@P)cFG(XyORS`kwuNB~RmcOvoL$J!5JhRco2O61MP`t2Vm)0$ zgNnYCAC0C~#u@>!KZ~6lqr?xB;^Sz=!yDQvq#%l_GP*+RJwoy8fI8I|TL zjtv#!o0Z{=Tv&%#jhGYddT=91FfoL$mq zr8rVT`9U8kjS={q^#?6u1>jFjHZZSuOpH;7!hMWgdBmBfX`~SQX}Ps_l_gq_JqzH# zX~Y7`iKvU-P#Z>xMUCJ^376`d%}~L$SQCAr1$KODdsE%i1{4(swx( z%d&w*m&VNs)2`qyAcygx{Mn*mq`Z`c<)tGXY}KztDvqL5>oJv^#=f7-dDiaP*1y&>B@GB#cJ7xeb=iCNvJmbEiWV~cVaex$|- zp2yy}a{B8Ympke{HCqbfnGC2=UfLErQONgEi60P#HlTX@1;aUu!Tpc4?B55Sh%N&c99chG~o|BZy+@%NEZn|oa$ob#;mVp+4qW9CYIj!8{+=6QnDg2U@O~x zaLtgpo41=LTk=fO>ju(gk0QrU7HC7A29lv#RZS-;dUJ0JE~#i8x|KM7td2mlYGv*8 zdls3qRT}dt|I>>Igozp;;fvqnG_&;1uUYF~t{YBl>33TIR!&*gHQPAU0j_nn{d=p# zhTfdjPS?HZxBdIiJ&``X>h{c0RhXR-&Ah^yI{_F?N3EVpb}$Kkk(^KL+@hFyP0a)r zmX@HHb7HM>^rHK^1`Cnc4n6W)Wt_cL6UQ}-uI3z)_yIb-d~-UmfD)@!$nEi@rIf3t zsYMqKuHwTz!#lUu<~3`!12H7C(1BLhHC$Jf_p;QPZOfS1diL77YVVo-fs6RT&b&ZQ zy>kAPIYL&6`9DEJ$bW+doHA&5Tge@VUb}s#>w3?fd%ctAe<^sj5?3OrSQ{AZ@7lK^ z*?HIfwoGL#;qLPj+p_A5uTQlmIxdmbeDZ4h_sj7o>nDo?{0;>F#<#O@g9 zr`O)0wgJ?QV(CMK$@g^a3d@WqvAErUk-OuRzB{b?fzTu%Y*@;qcUtZx z{NFrQk}nHdh}F3N*kkeSU!lZgcIN%e1J7fAi@otSM5ku<*|(KLXMLF(GCWe0V)jto z%40vuTHT-pK^jEgfK~~nAhuH`M%9QQ0hn0^CB`)MArEWo%-0xYy@EHyZ)d8@2jw>t zpRWc~3p?XWYA#1OJiPh-v&_8gfav!0J$1(8!ujjLn@aGpnb*+1V=7=k;<4-$PwHG- z9xL7xv?h)=*k5=o@#U>r(+@4tmrQ?*@Yh@Kg}ojB$@!Q{uP$IkEM0;B>2$fZ_M7v} zbFY%7&DP-O?Mt50U26aIo-okR6&5iqKUc(ZbS$jw`12|SFTh;FMQ%}cyb8fJWhk$) zV)qink`P#($AmVp;#>Mr`pZQdOfwsjs4HjeNa1xbzVge-Ao?1(Y7jE_x1n|&b7si5 zqQOeV$y`f;dRgAn^JL&e?0Fj9cA$uf(A8xo%_2MOIIPjBCth1U1q*vm-u`k`DbD36 z2s_^D_?vDhCxZdOfcvnb!v#A|7gc0Z4t`hy4&8ciVb9RGm%R0BG`ZSRhKQw^45Y2= zBlQ!_fa)3^nnTG9u8vQQ6~Q$q){x{S4fPl$YYlPc<$2|-C3YoD4%xSJK$c_tUzMly(y3l+2Mr)j-Wy%Zvl>YeGb66r#W+HcpdbG~g;qL?^r?`{f2t3d*>DYX17t4~W1U&`mRK1Hp{tdvW4C@&ZP=y$-6YV-$F1sO+guqUnyd}e zKJIn6*XL_Xc6j+FZX82vm}Nu51idQRTqSLs(=S(jF^$m5`6Pdw;Txp~S-1zzES+c*y zwtuY*!tHYRu0wZ z%XjYIGD(0YK4}wy?Y^xKtxg`^wT~LLShaP%b5XNq*S}vL1qw|vYwfIfo-jvz15JHO9XiIls}7t?W$$E2RQqT@LW7X*hfRn zf@X6&g#w7~Zy9<3itRMJ`=HHtNNzwj4PAEqo|Ux_sq~Pd3v83u3*rylx0M zK3HVA%VS-SUSOLlf!aF-TL%j>j5Q3mo=P$vH!wrepBw3P+7h~*x#vg z5$b2lI?zLX8g+YFPxSvj{E<(TIP1!SXePBm!XaTx9cJVjIfaP>khS}Zkkz&h#Bwto z0{U(;5NlwYm~IDIj|Z~2SXnMjhPozh03_v7E1K;{^@_kkS|W;0vgP{CIbJE?+rj%l zuMDSwt-z{_>JgHE&TA>$g~+JS64RDvO0(-jvkCl%BE@nc(Zf{DxgI@$iSP26i&UIp zrp^GRfgyHUlTX*&qhQP74JbuTqa4SK2Q|d=jW>7z$m1;<_;z^fjdD=+-MOi4y5T}<6~v#Ai;B~p2|x}fYh9}rqk>0= z@uP*&mc(45)b?PD@gM5hl<`3^Du=+B**li3oo|;5XvlY&w)^nQzT!EQZ&yT8hmNKi zfb29dz>ne-N=|NnamT}l9qPjY6e(kqY|A+EM{W=_K>Ig^ypXx;#1hB{9R>xnFO;rnZPXIMu|3t2j8ux+oO4L*gH1f;xlc5(1 z%1$xJ%3OdTfKo%j0s*BQGe{m=H}d8ZmV#*TLVjL94kQ%I+qlsv5gKdX)@N((d6|{O z9u}0=$5$tRrRU15l>BVMt{d5a@3;Kby(@(s#>LL>aO^pLyl@Ovs}6`-B;qfh@(Jmw zWEppXXV6z>=XWMWO}t)=HRE`L9z~GG-0sy@lF+FyFchvmJJ(nTLS79n02!?k26dDp za5-Et#-c+;)JgLvpbWQpGv@PEEveeom85f%Y+-N`J7r_AXgtAXDV42BEa&*#xHK88 zVQ!H2hQtVv7ZAvhP5uWxE0$QJ}e8mnpV-6c{;Al1j zsr6KwIUq?j(jc*~{(+fnz3(hsuC!-z#30FZr)T^IMlp&@%%B~HdeVXi<^^3!SZAY* zW)`r0#VL007rc3BFuJr}|AWa*i!;?b(=$k=Q*i}HW7Tbk4!=M3=U@R~wz2l3ICxSg zM|H^1AP%fqYV8xyi0Cw|YP1CfF=SR{cLrt*%#@U4^Mq~zYMt-t+Ck;8hT|Hc5c+XC zUYI$9F&_c(xNJn(BrrfFof>?i@_^%AOO&Z0!W%~9`uzd3+yJ4KJWYJmq?%3kc(YTm}SG8%fxl zE6F(lyjS_!rM2hQnxG^56;1#rs4{i~a7V}}lMb~ZV=|9C&^KsUJ7~nq&vNoUC7(lw zAoB$o&KbhZ8owNM*zhz|uP?`PT8LtYs3CqtXS;_4G?nSi(&}m<4tFus{JwU;9;eX1 z6vVpyw*f%Ld-*{4Ji(DMv|5UT=a-Jwv??c(rM4+T;M!uEBTJxg^}Vuz(6WXl&_kg{ z#vE1QCXVKLxn60-urITQpvYdsU{&QDPAeo7xKk+@wNtm#UU&5w36)f%OrNq`BogJG zY)$+qy-(<(Bs2u=<1_*dh902uThw%Cl`$c}%whPYN~!VM?|`$+I&UP{Qs_~<{Wwb7 zpbi=jq*JJ}t<;<_vI(ji70d@ABaEoygP5^ow$Jo>5jp*IQoQZxQ0huSBym`mH!!ob zw8?)X0GiV~7-n^AiyKVm3`&wFrwzq>mF(r@y5u^nI+Us+sU|Oh)4!2>5rRHIZUHCNe4M7E}l-%}1D%lYNNz zhN5rcyx1|;BdJePgJP2Dd}JgCH5?(nlPx<9F)J>CeQM)E6!V~f5s&~KLg&P#Dx<+k z{x~WW8)!TL0A!i^cge#t{`qu$W34Bm-MY2??WIib9XAAKPL>+C*6l(Q{8p~1@pLP} z`8prfABbz?l&H2eelnPB$-S4hlM~9?_W_kN4@)BL%a_>;hqH}cuvhh&@q|_ETHhqf zWdKJlBtqs~^Y@sL>z-MIyUT#klte})V~EjC&cn<%>ZUxHv9nj>ycS+Gw947~s$=uk zLfZ329a}%dTWE+%^DdApfn6@4ymXBkL%!TO9Hggo-@6pbo-5$WC=|;~`a**VlSl;r3}iK7mG-!+u;VP;;ojoj+X||IU8@ z%Jzo`K3M>uSw*Vo=(T-mvy(fj{C>>Vp}@OVR7(5a|A5aep%e;ma!sU8GiQT{7%0T2vhLQrAp@HZ!qJ4ws4K-V!aye0r<&@29~OJ((jwP(R0ZcCLc@kX$aOnUNHMfcQBJJ{VCPpKt5}_%t(Es4&$JbqP7(y16wf29lnze9eUtsvtAkY2oYn$J$x%J#jM*dkKm8_cAlS;Xj~; zdOw%!_Usumd&s_yzhHat7e~U5ZF|`}g9DV8+p&yB$^FNmUuZ(Pf1|CsBEs0 z8|X>73>x3ko2&O6d%5-$@G@1n!c(fE^*XobZM;z-hpGJU(Ta2b4JbU60Yy^yTB~XM zjoX9WzxMRq?(V$Z-qA50eX(ri*YSb-+?VrhK>oVl&&2OxIVe->O~Z| z@!s+w-+Gc}{je*HgNzE`bSk8P+5Qe^;3r?dRkqP4E01w&?OWe#YUl|Fy;s4r|VA!eJ#=z zKWaoky)DcE_i;r;<;U=}1^L!mgqb(8OA(p0O}3myBWlO(vAS`EpKh49_OZ{v7fg_G zU*|HEeZ6%B2gP8rl980G zjVwc1k)~1Rz8$sHhPw4%&;NUmRfG%Of6V-y6nfeTv01cc?> ze0*&P0e1_mpQZPt?tb5U`{z7L;U-hc>Ak(=XWavw8^dS|^T`iqbua143TOVT|Ihb|#ZW|@ zrKHSYFf8XZu;u}Urn|3`G&EB_1G&}M3&$oYX zP73Sr5iOjv3xvr*3?{ih`O$+j^)7!vw}-&o)}U@~fiPd|D3k>13!sqDQ|F*TTdII% ziL!+3pWVXfBd{n6FqonX@^J1RUop~)eUSTJG{TQRs7NlHw6H1z8TYCDXeeU9&%SiY z3jXKFn;Y+c3WF#%^sdepzd06i{^TwO1Ec-m%s?}fa*e4Awr%Dr^`T&%CJNzQOPGv9 z0c#1w2d)FvE?A{-&SwlNNgQ2_V^C0KnEol*{(7U~vt7-y7mxPQ>XLL*VHd~EgcR+H3ivsMeAhQ_lFvzIfBY|1YQMa-_ zZ5ZyOOdOkg{>*ZE<=OstsV3d?S`=X4;-}Jk_dlH&T;2cDeFc_!+BfFsZyt<~4ac{C za3lf;#g)I8ItLrJTSms-9B4$CaLOH9Pr6$=^HS{2m=gPCSx5G7RDFx#1AIPhjU@g1;=IhWXo42b3z!KeC*A_jk5yP~2`+u~v&UKmB!g z)C93GpHVLY=Vs0am1#4(Rc#; z0S5el`rkc#VAu0tT~T4Pp#VrqnFBb4+C6f#jmdC{3k zk)7y%(8y@FcjWLT>~i>Fb)i5{dr7y*TH&gw8GtsWah7sq0h4AADHEIO1|Q6;MO`Ep zP{7pA*OunjiTd&N*v-%orKE)xwOLbY9U)EFv&;0`dvyQ+Z+Z2EaB986e%x}*A7)*P z0ZrlB7JyH7c0F@uBo9-uiA=Gzxv0D|{qV1b%9PI1DkzG>nR*|U`QPOMl6fhyIe1J} z4Qn@fhg-f2C{g{5?ltzCEu3Cf4@<1~bjdKYG9Ru{GcODr9*w1qWci{6Piu*@a~Mz+ zHv2!vvP6-`fYMRdWarPMGrMRqDRu}|%bArc8!VF0*#s4v;lQNW;Yuh(V~Q?U`uE=im@%4*JR)@0C29*&hM<<#Dy-P zG%hVuESta7{&(_3HOxd=_YJ9~hxz$tGaRoa3m=B0t^|_MA_w#(s&XS-&+3{{U^x;b z$?zRcDGXacQk~V!8{)6kxydDi&{%G71#j^!=yOz3)!(1COD_63%X%VoVAcCyib<88 zy2;Q6#Du>NYA({@z!N2V^5Nar8h1=KA`d*)Q&O<({ufnm9+uSJw|(O*4upsXrX`3t z7a%HPwH;7_(8MXVGEp2btu(cHZ)e4j7}UUNJ0Q*)YJ-(+=UG$HO3QYom6p}jWV_w2 z9lGDFOFj!YyH+*=kGi}r%66}Gzk}*X%_ipZ^^?!gma7n%rQma2o+^V zzbCanFi_dM)^T+Gv|l0FM1tLNb0dtnJ5nT<5z*|qiBY2%d_XjfQNE@x$ugW0;pKsC$C}=%p4^?8 zzjIFIWRFwN{Ul=O{#}nAYn6vkH_zqPbvYMqn1!7`Uv7iETiX+$eb#Qf=eWs5lqdC; zU@$^N)5z+|`m5!=@wZ+;j^<3u@?QY};_>H7zuwSlH0`{uvG1{?-IEgr{%$eHx0KvY z40F|OXGn;11c548oPa(;bG*z$IV_CHzM>r-Z3@-+{r7von1w7fFyk!SE{aQFwLB81W|MKzdxbw+s2jkC`@cre3UVyE{ZT3TGmRHVn?QUQD9E4=2 z4ojdYs#0`$Q0B&&iqj05r#*s93@~u zR>(kYUO}I66GORGBTs~E${bc5R~rwrm90}?fwo4df4hf zC7vJBycAvay80Ac3~}lKT{plhD%Z`)+PH(eK$+(1gv2~IY212CD-1X|IeF{N9iUC0 ze!u(vPz}qJ%UyW1;`a8)``0l>PAggWT#4G3pRJ1z$2~rODyTf9cDVZGbn8F8DPuLx zANjNCd{v}H4+dadn{Ism|N9ZSnVM$yp)YD(T>aqbQPe>#TVz5!x`r3$5f-EF=&Y=z zBw|{OCg2a@1;eCZ;QHjF?@^~BD#qh!AqQShD?0d zSB{uZL>{tJITjPCmBTL4H!0L-qf}KX&qQ~rlE!qfNiti-SEA^T#2>^@WsC0CH|9Vg zdOm;+*hf}g-vcpY1_V?NE|msP#zrYXb7M>xaY4bsj@meX4?)S)RD+1&L=#1lO<29o zL<6&uT82YHG!;0hRe@<9m}c$#BGyI6gE;7lw(p}mM-G-ils;jc%bWAYo*pXZ1N-~m zo;G0X>8faev;A#t*V`)Vs>0XqJW zqJ5B}Vbc~{@e^a1KTWNH)5^e%UQb@MNq&Vn?sQ0sM5e#!0$u~O0qv3c9>9+S&+y=%28v9D8G ziyskzbfO+gnp%@elZYsPQFp`lJMU|E zsUYT3J#i+`ah_7h*}a6M59M##L(lZ=&wJH&JImF&5QQ=cHIJDaMEKsajDPxWOza@E z8*jkEWRWHJX*iZetxgq-au6XdX&7xw`h+B+)9ymMJ8V!`6BpKSFiG*zuufj8))kKe z;#Kch9Nn@ANyzEcX?qj#h4}_?FthbUN#9jPk zs!c9gkUaLOs=9R}-4Skv)0SR(YNj1u>}5DEoIV(uXr*XY>xGiMNBD|s)|yKyG*?4fFDl#j>BqN zj~;eq7+(MK-v>_S;)f-)o%XpOX(lBKdEHHBnvX&a*D%1jO2Dl`1;_VyW$e~~6QxFD zADA@z`3N2h=Dan^&=uu5aSnh(7)mn$t{ZAR>z)%yy_Zc+xq*hqH9~vsH&eSuWXET9 zaGv|(!~YDswcyL@7X??#DcE~wX~@W@vCn5@**G?YcoRl`d=6JvXdU$M_3Lh{HLvZ6 zmTThpl-w92=E$Kd(JGj0#Mz{{|M5b!{YPpbDx`+VEu>Ib|DB=kW?8+ornXkv*-|ZO zQt%vFuU&6@{6%Z4!O7+WBhETse!b9sf{wj@Hnn`<#5*UqHo<#jfK`yWHn0G#3al=M zT~t$8P1&_4c0vQdkyO)c35Gc83VHv4cGL)NQ^>ZmmhuisG3n|z(w3${xD>ByUm)+c zeVM_8Rsmg&LedL~3ArA8%)=c@-S^?shBdm|^ZJ|M)uqc$)^dxXw{05}k3Myp#KNQY9 z*poh8?4WU393kA0i^2EHY`(oW@DKucC*!S-K! z5rl;`za^lOCX0-abZd@$BbwLEx&egBQxXLjFzk5uz~#-F*u~-3c?^6Lu&SuoFVLy2y)D@&QuDfNhe?>Uq{F!+pIIoef47i zboFE#=2Bq&EHoltNuv(2Ymk6dBmv3E7OSTBHHx>sp0Xt(AF@)hDxwErIUJoc=p24!uCMukkntk^?_srom{|6J;1B(BpT!plDSRAS)-c0!P#IGvv zQFtnTB(4Oec4lL~E!)_4UaY?H~fnj17?mRc&^-;01 z(Zs&{E>R@%RU@e|gI*VZ@XtfH#TRCiZS~YaO|?9!QUwQ_pB)}LH?cHN+>Y7B2l&_JT9Nxgxdk@<$6KtOv#$HbfgjMH69Up1sAgnwiaF`lP z+c(U>yYac!F3*phdYjlt6Z(3TQj*GQE&F&Wb08m-)*OX!&*isV5FM@|_OlniZUa+o zuOlyW37Mmr@?W?D2W3j5Gc5J0PBwm{TN|() zCbsddnz?f{=e?TOivoSiu6JRK^LAG}PLEmH59W9Pcrc$7FrAZ5a<_`nE`q*Bp{NL; zoPxt9T12f&6vY^zE+P$gjirlz18(#`x5=QK?VhXefxmnHWtu{>cr!`j_&>4C@xKHc zIMlRAAZ=L(WCwGUbLnwwtG5oGdZ%{kOE~dpq6Z0U7*I%aC?uDH6!^S;4BR_01`CS9 zsPsnNIH%a0cnJO-Y}w&nCH)z{?U}x^XRzk{Cw`Hh-nbtk`?3J;d*J~TGr0C74r*dz zh8;u&z)sF~2R6D(9fV@+o6!d;=+5{x$pDX!ppw9L@qOQSRfLgay!20kXxRiAUC`meV;SOL5CTU5PF zNuial6EcuDJ^4044(EQ0v}1t4YehVXce;1T=|3lHC_=oQ8pJN$GWcA!f8{e2FS!=t z>#bqkGzkPwFp zuMUAe6`@8a3OD)RMvIdyOM%r3(eAgIqFX01#(b3WFny>gN^Tmi9V|cdeX&l_bBrh9lz$m&Df=k=|Ic%Zp z(q#h;t@S5NrOl3J=ze5?Iod6s$I2Dt&iE{wSXpJyUqF@6AB~@=vxxiM>e4N``x^Wi zbD%Ngukg*YahfJkrRiy}Iy=*^Nt6M8uaFWS3oF16@5K-w&%Vs+?Zdv`|7+g!nF8C@ z({%aifDs_y5J zXZ{pOk-;x?7Lo);ZdXw`#g-7dj?%_~SH6uCt+d0hNtaNqrGY9w{mEKq?@y>7lG93o zL>&yR)SVa^25G1|)v>m7YylKD=~}N00HouG&ZzwGc8>Nq)whDLok$to@w`ZPS?!XX zvr~@PhlCgk4%{U~H{oFFHdxNm>>z0b63ONcYO=`;+hd-!)$1a0+Bn!Jl6Eq^2yEVH zQabivEzZ)@h(50c)f`(5>sRCXDu6ppYn=h3ElyvPn$7>_sYtE|W2P{8>)AQu%M!EY zJtp0w2TyI@(aZrr&d#~F&+Rd>8unNJ@X6ZX+$-1i0zc{zz|t7m73ZQ5kGxJD0Ho8y z8*a`AYj%C3i*FLM-52VvxmPV;8lPW4*m?{C?4AbcQoMVVP|B)oJ_)MlRx>bYDZ4Sy zP($}#LCj`Hh*0(c9#KexPLF5|$DEy3E=HLbnti6GFMr?Ab}3QuL*>_nv^ZPcw;YVU z3hB5m5wIv@Klk1*nbd85pEQxW=jHyjh$4>K7sHd50Ybv#hjtUdH{bz9@a??c7&XFa zf4ZEY1ORarcYE7TK=|kq*2zzEff)Ed13m}ncsmeXX9UkyWM%M`>H7NmtF9oe;?zKY z*L*f%JTzHn7=F z#=VpgMNvxe4>@l;7x5^z+%vnTg0mU?sRH`oRJTSo++|iapt<~-_jrf0-$rHRZ~5U$ z9TyQ-qcE8l*j3v`aRdqKs@JIXecu^XCQ7HNe44SZHB=Y zNoIO_L0|-=P?hlZG)%SN8NfS60Vb1o$&HZ0c|`>x4sFuBQiOv0%~8X)hPLJz2N>UH zb7S^HgrY8>M~Wi8^Yh($h5J@j(3k;1S~hF&^6`hLUpN1ZPD|S>Xc>|`>PoySbj%>7 z;`t!8m^U^Q7p`vKnG9tw<TsZv2Qk8DhCGeqCWD% zDL8YuBOvEJz$P#fkENZkRjtqFbYzqKMAp_@#H502nW>kIL7z}2!($RkN*s}*g7DCw z_hL8y*x@&vByw+ls6vH~QzllV?qR2@?>&!SnrnaBT_+v$mu#81!Y_jvC6 z;x8K5eNKG2RA@|KK2a`2M;Uq0|1&z|AT?zJY%ihPD( zR!pPW*?B?^JxF`gO}oo|Gp}GHm{L~v;Io#cYXB>iy7e&daJs!3U>>DL8yUrLiEtZ` zB5!7uR5ahX{lxC`7fK-Bl!{kMBLhy9b5DBk;l*cOWTX7R6mq_5@L<948<92wx|+aX27Sr^VxrB_9~^+uR&L@g$6d^Cr|5g;a6&_ zS7uFL__5!U%d2=(gGeTuWlQsyh}^f=3Cui4C_;v@_a5%G!N$v#bH%|{2Hw)Dzn!iX z6^Gp|Ag$-LnFT-WdmIvV2WoHphL+3BHvRr%^8*?&qUE{k(igh0RJz1!#>C5M{wjg> zbDsO}1J0SB8?tU{f(zMRvAmpPMpSjRW4Zz z{xjkj-}ImMP=h`U3mH`yBVfMwiY#u@Ei#*goeJQkl8c!G2{;oC>DWx#OwurqV^SuV z#`7&bZoO{g^*!(vI(C(op!mzH$7}{H212)7*sXfDWTAW6%>{ezu*7F|y}E3hWoqcH zTQVO1N+#e`w-UfK97MxzJ0Wit6TAu-= zsHD%R>^%+d)u<2j{8xk1|4s)KTpg;>tL&om0xL~~aEhEW9$o9Py$)Uu^xr9r!B@N? zFk$lf!5JMNAaV6P8+^Lbjx46ZU z?a`Eb4Q-b#-V07an3Fa3C@V~DfM4sWI!A#;l%0tw|2uV|qR5c&vG?(T)ZVVc;DPncGN+uR2BV!_%u=34&WZlhQ z?C%pQT7#M^TJC=fm0pgot@4l#k+!8@`1>U0Y{-Gj*Pr=m%$=Qk;ivR$9^zv|yClR3 zqS|d_Is;g(-_tx_oKB0Sphmjmd&ep`J54gGx2OZ273tjZwvwHM%V(3=pZb?;iym}y z*4J*3&OUVl_vFKLb$2-jbjT7JE~on3p?-<>eCBH6v^cGq%3A(GR|u|<_cA|i&H_$aSN2VE+Ooa5z$j1k(_j%QE7Bo ze%pWg6^_=VnSNDSLG_i40G4jLdQ`^nwyVUwR@U7Iv&{{HG!~5qQlnH%A(yCbNH<%z zq($y{fs2tFSB*i-5-O{N(Ak3t=)_DJTsmAzWlU-S5Dx)AiJdOxKSRHT?SpWV2vR)z%HZcnvr_x@-hu1tdK@)#ov1;MkvA|) z@i>XM(ov|6415MjVoIjzBsg!E`5@R}g0&(dQ!e!iNEG_ovx){uNa3}xe4L@FIB*WH zE;*iNKIpcZCKD#S+wbBIoqY4-k0-jz?I+*vs$U5Zcp4upM^-C|Vv?P+o>tvoZ%DaR zTUX(Y&g!!)O(K_~eWW>y+gzPa7?(q%&$mYQhrN9V`1KUEJbCA`Y*$s%qY6lU`My&d z+HuAgHLpd9xV>gH6n*UD5}kw&Kp5WRV#T%+pCDQT=#4fiP&e~vQ$v!#1i z2u1RE7uM#w#$Z=h&GGMlwk@M~L2q&(CqhTG+=+=1f|o_NUxQD+FhxiMh-e221c3AP zN>oC^E$RZZH2%$#ZAkP}eeGp;(RIo_F$)>u6dyX@K&xvCM6Rg$L9~kyR;AWs224m% zAxKRvG_ME-8x~{X^>Q}>jYbppVc=foByqW0ZhEphkRnKb_-*R;`YRQ-kUrzn`u32D zSxc5*DPNv2Oxuo#dKFQJLhkj{*w|{5ty1#zOC)ZL_Ph;mUT-dYcgXT*S?t1c`|>Z^ zLK|7rWzGVbGdzaTsc7)crMVc!9t}-NU0gO$^g+Uw%q-HM{Hh-GKf~1oTbDuCY$xmD zM%-tZu|B)NJe!~cgsAK*fef6+8|Bi6DKSK@i3@av>E#kX!p98`G9zf{0FM#SN>_Y# z^%8%!3qGQNg%u6M(+xYPGeMAg-j&}UUV@}VKh4cQeD1oRaZB}pL=%@QtO-ljmW_Co2yvj}<^ZBmMTiBi4g z2MT@3hc;EvCM9Gdvz)WyS}6I(WAO+F&+bLd_!n`-L8`8GDT1Od2hul1_`+H^-qNvU z0etI>MQPlJ$ODvi@>JWc;{GwRXA%C(o(uPm!S9ytV7s&nk zFiD$RZ$Yb80k(@4=^pzhYwYait5xh>;@8-rU3EFcPk7U&16Dyn&JM}|tS7M6jVFpT zhh&qGRMPL~7VSxN0lY^<<9Rev9Qg1xNu^MZ3> zL^%|=KHn1h*X3KTfCuu5kq2?Lle}_8)iXVC`^T2SANa){#*8eshj7n zy~%_bI8Ywd!(H<|*QtZV^mT+*VM>?;wD`M;4*Q_gz_vdrIsD7gfmhYh_YtC@XMw51 zowgn?q+cR{Xa2fJsFbKYbKR{+BXZypxuRk9M=ZS0((7DHo?qVgPEG{0cF#SJh5=uQ zeq95f!#|JOYSt8b;nfFcg9>zFh)Unq2<*p1k@W$#vuAdQ93Wn z8|hhZMN!UwWL_m{R;eW?)&z7BV~nH!jEy;S?90WWfDE{`>VUoBHq4qOnGZ9_Y2n}vui#X7d{a7uoj&o{1`j;g- z&L{iVbB{lSXBa^lNQqyFjDVn;`YR7?Oe(&5D=ny^p0diA1cgK)j5RbPK#i}j2dBer%9d>admseRTtxT)>}SX+L`SM zRxCTomdP_d+Q|w2*_UO33a9h9KiG!uv!NC;g&1>}n9aT}6sKs~9WDT|{kX^CmtvM< zc7T)eA7>+D+`k@w{N1#md?Zm|ad;Hm)>;)3jyVSu{m6F~l- zF<_t>=P-^+0=YJ2;|XrB(Tw=&iyf zk)E#~8o8R2k z9n4^!>t=r~{Cb3JSJ`MCaJT3qV6gxE&M-03(!=O5I5G^TU6lKTC+a5+g88FnVy?$S zuTrcD5kuo-1@r6o?fdrYuC3hNOuFO7$CQuYTMnKzMe7~I+|#H z>HFSY#&UITf(&4D`CgZa`jqR|Doo!S5O;dtE%|EZ+`b1Yg{LmV%>?)bgiuG<#6k^l z>n7kweVEzNlxOM((Z4l0ba+`DyAtg`;L;Rc91v}sn@~ET`jU>OPqWZFv@$7Lk(%AE zjxN5#)y1=fXjma@geG=`OVrV|paDNtUdNP`krbXw>M9H^^Jmq3sXK&g1g!<7ih3Ydk@R|?qmUKidpCyUw`weZTsrxa z1MWZj&9BS$rBy3U9a7``*~s}|N5ArypFb}Zq~IhA|BP||xc=^Pba?C^$F4872a|&E z6ZCPo5^|I8|2yKM{F`t>ltr(cEcFS}&y_r#Q3dpDO`qO_`F|RMww6LU_`k?V#bW_* zaPP$f@P?MQ=FS_PvbK)vvTL`lU7rvA?fktg#SY~5uZ#4DfJ612Eu(k2OX^RE710aN z>a5{X>jktcCr3?d5nKQrLu1EY${Sj6@R!MbC6-Hhp(so;^*^&p@d>CA*p2f*tVvve;yHT~^O=L{k;4AoDtz3gjYksi>wBe$qj zp$^i9VN&3~7rFJ@L78F7C4|!#N~RB*H<(ATvkrJHG~>f@a8(}h_lZ@Hok2T=64os5 zY6syUOt^Nah#g}bYp(zTA4LLBcZJn|s@DrQkqQ#OrB7vSI_0?td30 z?&TG-Dal`z^VKMVt0%3SV3wm%H5-QnAzlbVKfzaySee0(fz8V;!azz7TMvz}7VEWg zG6UKGy=zvN@OzU>iePf4YgG%6w^|oV`8jmo`R5;Pr+9HFH%m9KyCm;Kn5M9bbin26 z4pPd-;>uc9NZZG)w|bh=ujk^s$8?BAtXXEMQ9ar|D=7_%OqUIyK#DrMQrY36I|Z@f z;(@G2_(RY2=F=~Vop<{fefJ6P2}~`rg6nWC(8K*AGj6+t|D4D|4Bwi#gt}vMjeaMAWIt4Ye=LYtW~9UW`+?EIP#0 z=cFP^wH0cfv^RBf3~qVNl`p!g>^zr1EdWrMHZHE3SHNns72)Vh*pRFEUbvbx)%bOV*jvMvR`s4SB zt){suT2;jnFke`*9<9La*xDdZ-LE>%xi!kefimb>n?HJR=>g$IN9gNQXZU*bT00fo4l+Nc(lLkfuwGRga28 zwVRRLFK0!8QpN(50k^){--s24PflsSNDt*1@eG`%Ez8gq~q_hP`Me5$ZHufn6yZ(bY~Kv z@|3mAajp*bt1<3OJ>;m_;ew|l2z!KGI~QmEf!lOFLmvVmfl*-HXUSFPYeYPtGii7h zBOb)?tl-a!E<4>?ICF20S%v58v+XHevJ^kKg*zNuFD9sZI-KhNFR+Z4rNl~d*=6+rAZ4{6M>k+~2(K)7bDFIjG|Uh#O%^Vo0Z z-WIo+eC6gz;a^pmuR?BdNcw_PSKfLx`xM}?ps#J(S4Ia~C7%G!JWZ>dL!Z7HIbG0K zk%gUVU+XmJxjdV7V$&pV^laaHS?OU+Sj_L=Nb9hTbA2chkT16f+idkdA zM(|i)h}bd<5r|H-(F+_e2d&9>rf&u2a=KQ!h^>St5~1O2@aUWk|Bxuymp;+moznX7 z=O1DFxTks+|MZmT<}`ZMX9b!wsZ&#N?-n3N&Oxg;D?4ypY>X62!DI;juzGCs_T)SX94>CF%3=BNUt79R)}pMaF#bXs5rr7 z&4lNN!0wM(fkr~K{)(a%JJsJr5t1E>J@O6C_K*b>_`BFI zn#k-;OB0;UC`!tGQ}6+O8^5DQq?mbIJUN>Po~U z;&8^&cuC+o#tfnPjCuuvsRq^!E+Pj>6vjOijwT(3uaNCQ6>#q^h3g%?V!7V6pn$`b zz1ElCx^oiT2|dvpdTdEUb0m*iVQBH(*7;2$*z)3umTZ|xePPM^gW|L@hnohnA}l2P2K3vp#9x2`6fyTL8X&Rq;LNS#uRfM-QA#nq z^Tp_j;9nlU?E3rf`ad+zwJX^GfBE#Wrv10S&&_0J2+pxMxD_5GD-Oo5byLnT#;!)* z=@p}(x>(B{tsx>W3&d3=HKCP&L4+G|JUNRzyAy_kecP*|$7y2{E4vXgVMgCO=z%?@ zIk0k*ri7Q~DqEj#__BgQS0s8beVTu}`@nvuo0(A?K^OAh*4UYw^(e|F((u&-Of(__ zCOGl>`O@mL(+%xq3s*@$KTWMiH>CMt+1GY}MTx>H6nZw7ojkv%EI#13in>^lrT21Y zqoc#v-ZFj?;d#E5*$RON-eRErOcocx^pv|Qy12&b0dtdGQNxX6^D{aIarU%WZ76DM zVTOo6dLH;PU(Ar0g#DV^{pImF%C4wV1wTiQ4_JS1ED_??JgI-^ba`0#*!##g-a22p zy?W!(vLVZ2+WP8hPRmgE{`MwL+dUelVE*yvnxtqqio7d4HB*Mu$>qfmp#XyhFWRuu zlW@S}my0HrjyAXMxRjngw()TT1T8B*{HKY}Zl`;Gskf;wcutwR1^M`p#mG$IA*`6h zOgC4G>(hFWgZr(v|K>PXZq$puT}WUDHQy|wir`p>Ael-)*8NEWKq?&hd$t~FNE zn0>H74UJ6^2+6XO!|WI|oH!XnjKO)~7J2B|+3{6_8gs=r9o$1Qv~kYlqH*%IK^=aB zR9sNT#QpY#M%eDMyYZ!8>;}P}CBO{+z3=b(C+Xs*z}S@)#=Euor-#zx%yq%PwZ2VY zp@*?PW$}8*U}ejd^haEJ$`UP40>|mv)qwX}1O5@I6>W~#R&_aq^{Q%RU|bpQ?z^{> z-}^Dv(rJ1lFr=lFl_E_XHX83zp)g9(=yXv^+(Wn6X9x4jyMwPdX<@C@%aWCpE}l7G zq?=_5i6%0M_%}sL0AsT|v|qf=i}%7!7c*QRcmgu?-G}teJ?7o;im1wVf+1QRk%`Y8 z!-#1yXd$8(EBzYT`LzaH=EZ2A(9=Jed?~7aWh2FS1Ro7U+Yk7u(4#nxN*MxV25PH2 z9?;l|f0gbV$9e^qnvLXl+%ZNStS~XR?@fg4UVyHS(h2 zi;pbFvZITT0EX=2GACKtFntpdS;ZSOP?T{dx%cJ| zMl1#EYYt#gifa_Iv1KtRdtL=KhZ37UOrdVxy7Sq%1B2+Bwo=$?-q2G@A%DB-$8@IL0wR8PS? zzci&ou}!ghzSQU97<8Bn5u7IrXrq=sdMwWDw1FE~a7aBbU%3}ir%@RgkU1b9&LE-89Xw)1Vb5+C$}E09su{{F(j#{&6|)daW$N*u&VB2ezluIG)BAtvDJ$ zU{ZfJxyEdkkd#-$+F84skc%4bKZH)wsjiCKtM2?NH{?{Ee?FbfL{jvSSO?yO-bIK$ zydKK-G@}&gwI;Y`qjeRRV;=fnZN|i?%8MId9JxRihxJr!>arZ!a19l}S=cyDbG_BR zlyz(4Us}HnD#B>Ge(&>Ah2K&Nxv4rLBTBs zCQbGD)Pq8Q;0@ni9j2iWoTz9IL8%gHIOyLLpUEWw{fY4#HBh_T8;DNi_wGS-S z2yU8Htd~xw&|RKcbur-PFBNLBlzfFHE*{2_qtSAs5tNNm)$1Gi>ZP(-#2l+?IH`dm zU76vRzVN9j-qc7WY~o~(i2=)5*{I5Ryrd$gnD%5|Bn zy|txvKAkVFtvvng5wz&kAt&PJyN{gN7Sld%XGT(M1Ab0coXoD#Y5-e8_RjODd@F4L zB@Lyjr-uzHg_gzW*IQM`xe+BotQfRXCGWy=O2o}M{yyen-futp*!^U~HY?E`3rE0eW%)&*p>n=k zJ1C4%W9LP15IL8+rQR(xScSc7}F@hEMPm_e;#i*HrsddLs$gTqo8yc z=}bKB8?HUMipirqwKSTNU^!~)&Vew^k$%&_8s*i)5p_AUZWPC1G2Uc+vdVuFzJfBB z!RP4F3J{;Geh`ty$JdBJtp{g=<}TPa?`vPOQ#moUsJxFVqw4}(!IqOG=hJGk<3rYm zdFp6F>lIUgG6X8*3v_TBTx3(l{MtCgYF$Z^Fs)jn2cISoj1Lax(&SfDcTj_oWECr>LsnyqQk_6EF+ewHQXX9hBikJ@=U*| z&8pUCs_w3I69kvRS4^i;6vaz`q?WCKw7-DVLvOu?3OK~of9V(9cvM(m08btDtR{?U zMZ4uhhPeAM0*iAP!&1|Pj2g2+*h)uMoUP_F(7UKjcV#Y`(fj|82sv)XLP^h>I)_u{EMY_0!$a!s}6v z2k=R`Vw6Y0ep^nBJHK*yc`kJs|m5Sl6udV(^{o2^NgG^C)JKYDSd1rkJDM z*@$CTS;FO91p=iZ$vAIO@d6;`CPrf>TIQ9Gt+RxYRE)O7^BOH*2GeoX$$S5_KiQRkN3CQ`&Ia-4<&I zG*(o?!XWNNrYR(?8a6#o>zPbdjq!vG9xj6E4v@@qIczdB&LV1faeXpm01vZoP>@1Y z?4vY3^hIPaZConsW+m>wFC*oQIfTb}NQ%$T9_$J!Ph3;cQQVndq609ZL_Ruj`Y%8G zyejV_D0b43r%yemj5D`ixa5ZrKC=WD)KRtF$m7q94r`BU<_S>ml%VX{J}sQ9rk3&$ zfUfEyjU(Ic%yy5SL)|njz(=C|Xe!99iCO~@lf5&@my)SRscQn#`o<@`|?1b7TX?tK(+#$~FZbDPIt-Z|^z z_3+fqoDO$Q15^JU*BTBSN9BXGfe}LhbPY@bwr9*I)t)B92DQylbhMFQ44J4h(B(iY zZuvgClvOh{Ym&Df7ok(d&4~f~N#L4bI!Mk)BiAgTRcXeY%DY@8(?Hx9!9JkE>07`v zM?Y%LhO>xgZQ*HAFbgc@5(U2Xxro`A$Hsqt-iRIE%*M-k^wPz#87#gSTeJ4avPTzC zNoeXl`Wt8eSqXUuj$NyDdvoM3iFd&##7Ea8@Q^utNT4+YoY|JnU-eY#_ag)H!De(V zq^U+GuN6?h$3V#0>MEiTx5|LWp#*`qaIC7=^@#2vodKF4smN_)ZpOG2 zRAh1n03qp+QS6cL`H2=v2pebH|)FKlru69YvAw*;{et5HM+Lvl|oy9Eiw ztkh83$6Zt;1T{b{S6s@=YHFJ9nfYG#@BBXJ{(SzB^S=J$ z=<$N<`Fg${58Vw1GyLT-SVMdJV?)%+yqP!uP}mLbwo8w@)4sY412SMQu!2$Cu?Zn~ zH(1f@L03+{&G?{v7&xr6p0{t^=E~U@J?qIOc*>vE2hMIYDn+-=(1t%(b7|h8LI>Lw z5Clohib;H+n^ZR#9^&(xz#9vw3%F`;klK2McEyfJP4Kt5Aj>?#-CpYswR_!uGo~f2 zb^ceCr?OH;$3`@Tr>G9~NIwt~+<*Ug7`aqENZl|!9}4;Ct*`#AAM6<<^h~Zvn2=53$hC-{sJ@r}J-wU1z%Wgk9uTHM#BZC?D-w7~B5_w$Z# zfTAM|Njc(2oR8i!XkZZ_pmC)=^%9gjpRYE3fiRxWXpjr5yj)lXM0LZU=(&gb%<&t4Yd0&;%t|yZIR+eRb4vxAF)5 z2XQ8o5d}Cse4%I)GFuJL^YM>FjC45`)|K#e*Dri{_3_D`e{T0&loF68$DZ%*Uc2C% zELq+rrp2CU@XMq$I_Sf1(A~Tk8}*h+0$d?@R6k3=M$@|pg)VHY1L|rO~Tr_ z4mcMk(T@2jdhQCc)cI(2`q;Ih+gVL}KWSy&(^+1O|7|lm{?U0Ujd8lkuQ`s50*xBk zqK6arm}h%Jlt$Ye6`T?@52M^AA~4-64F^b^I5t^W9H__;4v=_chJmTxSL!3rt(ESw z_X|Bi+M}<+bD1dx2{(k5C6ySFd$Z}4wi%84Q09&%J;guFQtPeFPYzksnv z?Mt57^zIokfq;@N?23fauq`V9E>{n04)jO=j;5)}UV(rLj={va6Lu z!{4ZdIl^LLEo)ccKfYhS>4Wy(#RaJQD`xOn@+MF76%LsTApwc)-@aqxPsvSYPf1+al@E}wTq$aD*3I#-liv_58Hxn;~k3h#im!1+&Kp05N zM6)`uLo$wR0xWs)@bCi^Tk9O%7Bas-Gqnc5l{?b)dI$|xM&r|_FD~|6vxVNBZVMPc zapkwze)Tm!V(G4FUZK^2pyj5g{OOA7XR*U0wW)$}BIt9)lyE(wrvPSFt=~23Ir?-n zU{Up_Ct}4h8S*j3y(G2cV!+7h%M*S%*oSC8W3?0~=jFtpdOU#uvq%aF0UPR}s0bfZ zpW1w*>v5pgxGHC(1TP9qPe0=Yg`spdrc+wsIfs$ur6sxx$ZgqZoO$3x5P~=s2jepK z>+OL1c0FxlyYcP3do7PQCS+s7;Hw|L`JQenp*?ayFs1pxtuJStm{TQQWy@n#r2Ji4x5yH+)t|U zY%v;@M1;!brhIYP*=v+)51m1Q7p>+KJ~BL`BW^G-T8phi>C;|lb@tyRPLe6Cf>1xV zPpHhNdO02V!SfxSv}O?XreM1EAa$)5E0`!42X&@190d9nU2L$Cu5nhK%;Z4gi*(Qy z!^}7~(bB+F(`eOMVK)wIs3LC5a-h`_apnlQp+e3aQIa2{hR4!HmEc(z0bi5>OGeq% z61PhkkW^sSCsZGI+xanvhmo6;CnjcCGGc9vdDh23)O=9%?JxUlaz@QK>PR?Ks0MOq zDP{jVJkoyX50!m(0YOvq5gJmUJNslRGWzu9T67*wkP&B30}Ie3CIt;GErl2a?g7N< z`HL)vzTeV4iW?aWx$Kfy&mI>7HqoVMm4W;8kQ)poXa+nnxIt95%NfI9sUclSe%2>z z&HrR5G7E&eGjo&3uBn8|Pzcie*D<;p+zafUem&t|F>ajU=yPPju z=fm{BNqT z4@ea{-~Mse{Bhr%TjLSua`Q?Rm!9A0yg8Gfoh=nB%3lnf{o}16*5%#r&v5nc`hL0Q zUNv$3>B)wY8(S_rl;GNfg3S6I=FFFAk4vP1YF1?)quMy99M$O;abWnZ0*;08D@TgM zsNHCFcwkj}qR$W+Sn_eezc!A*#=4vIdDgkO#PB7~fD)tmfTXW_=HiXPs~)>O4vH%3Y@t2HcTKf{4~$b=;%A9ZJuau_ zX|UdV99O(HUz>UVYnyzyO}ChpL$75s$90iLx`-sem1jya)Ay1>`~^r*u3XORl*_?j zb16PXf-238qv)7ux>JMC_5Ss9i6IBaen8cOKP^A z2#Q(p`y3A$Bf-qQcuKDwcR9FUoA-d~OhDelsN9MyDXj8Qcn-=xXeinZChq*9N?KUR zRZMB82T@YcpFhht&4YQ*gjK%!2<;;}`l`$4xBue0aZ&N(TS~a#{^~QGo2;+1{|-J7 zIdIl@@0-wE@}P6Hw}zFT07w(_@qsK=%@Ap^PKDrN07fNC_ImyT=RIl=ji{@fn-|4A ze0ZeV9$Slv{G5G(1t$8tOzxdBkk@A6gVtaj(m;6-w2uG2^+D2&2}WLR^9o7C-2aMs zIy56>lwVZ|;Ijdk+!+7mbZNO+N@GB_CBTy+*x5jD-6r&{a`oB*w_wU{cc-{qq_GKi zwr*Sj?Ong9W+9P*-)r>PdK|d>FHq;{--Y**?c{pVJ+N)tD|QMD>HF{thpQ=0TMf-H z(yeV-h*g+R>AG`)Po899tWml^ryE=uD>Z92?Bs7$m(OrY2+YN(SHEreyCt1G+kf|1 z&BEoyzfbWS#R_4y2JU%x*eYeK%k^60+05nHYX1>HEC8PgSN}8aJqWw*W^VREOT=`2 zq$~O!=ivkbLq)TJI0(p$1lRj?W9nP{UdvN2IM|6E@|$bFpPmR7B7(2|*1VjZW)h<_ zFi3+H`gr*oTUW0vh`ACgJ9E7mivrJTtaruOs|q_a6J@Fo&+enQiLo19%SrMGTH2}J z$E;PCU)MVoxN&Ir2gZamI9Lh})~%!NAE}mK;cv~kVE>ZT`nn2iHq_)LuQ{H4DHxVO zYp|QHw#dFv1?jI2@rXC~*Fz3=1gWn$Q%CqPTLPS+F-Gzb*DF@sqNC~i z=WZJBd%uEA?Z(VF*O5ZcrYg~% zR6LuqD9XLyw~76Ep^W>8(N=TNK9BpkW|cT#g2W}hKR98o_ErO=iTERVI%DTk&&+Zd zCOsT-i8?^rbBnSc+7uH4mJ9XTa~95{r7$;TABu*b90AOWlEcob)e^f0`X&DpFCb+uK-pd``U+pr=Au z5Xhu@*TXN4l;dTahuG=P&ll7RNWu}*OIUN1j#p67q3!_cw*Xme1&6?qh2WY9M-@uLp;oQDcdQ8B&0JW0-?Wy zWFo|4q=Q4KEmes<%SU)1bV5ZW?M1@A+`~>Fi)(H}Fw6hE+--M0oL-5}8A4BSll`>9 z{E#GLX+T2aF5{z;1crrG_dT}HH!E+%M?Wu+?08WvCl1JeM9Re3ut7Hv>FIau=<{DI6yh396bizP3RpxR{lV2{b$xHcUwEVvM9(_6xd3 z5*t(;0$j16J3uneYm)ir8hQa^H|HtFgs@i6%?b{T(tvly*wyQMoYS^Gq#m1Qlw%U; zz+ec<2O$E^?0F|OYx~Rj-x4r=o>gc^Xj`fxS1egQ)nrn^UkoDv16nZ(5#+BdhJD1- zErM{=Eu4BBeV<94oU&39fHY0QntNIPQia^Iz4*0tw9sghWRpez2(`l^4diG7v3-)7 zi|b2WMq#(RAg>2_n1Dcwd4y~C&hIyEMt6n>nN2W(5Q(7_NZO1%tJo zU|j1h7?-W1X&UO;g)Ge=s@dYvIXFATm;~w-pLac!L8QMW9)uW(!ZIZN znXqgRBlf7UvZ*Yjt|hk`U%E@)u{K>jfV^4y{dL{i^R-%NEhZ4JjGOyWCqMZ2j8&MI zK*V#}K8ji4&+sk$#zDt+!p3|0z(@3ktG0N2Fbp`e*Wf~2@ZPyT`hm0FGVoY*NL*)r zAGUT~m{j4DYXl;NfR23i>@NQvTL1HdwS6!7a~A^MzrGl{zT5fkB@)6H8=^zO6vP7d zIYvDhcFQCHtQsv?IL~)*h&LuAqd7(FeVJ&Avnbdr);%DLR};9x*+n*i#xkV4FP9V+ zWXaC$>?!B1RpGKU_SQ59S3jjaUb7@c2cE6oA`I|;qYc2+w+xUp_x5IKYy)8jh>I@Y zX(hkZS$H{tG6<2sCYd%ce@Khj?K1u=`E27Q%e|5Cye48jzEl6bmL&;#uPgS{Y=cYl z)xR8#|IbzXsCd`!#z8^9&yak8bvdyYw30?_)~wakQX%nXg-C}jeH#y)5iz4Q=}?%S zu3Hw7mtSWbKbqn2KyNtd_NO)PX9f+-dV3$wNO~GKsK!{VnOI9WZ#9EQ$@((=~g_V0b@P;nz8f9*TM^Tx&60td*;NXnC$$81&mrd zP*#bR+;8>&b7b=Uj|TTSPyf$Vny$CaI0Zt>3OG5cOW96^lp)qrX7ibKRHo-yBzFsOa~5kZ>0vk`#9@2 zEEDD8y;jvcVxDls3$%>Z~6%%m;p=1nepQHkJ zTLq%)MCe$9bq4wM=si&j^BTh4yp$H{z2E*={?k!F#vvlZOhG_JWwxXE%cbtqt!jyR zLCg_#5Rj4&%q7IcTQvBMq{8aqBf{X|j?8DTEBry2Y9X($!33f*L`F6QnkgH&Qz3@Dq%l}9^K<|VNdwr2 z6GDm_eb+H7y&$1AT){MO8K~3yG-NjEjvEy_MiM9AJm@~GeaisL_0R@g@9&pAnsfFX zVUblXlMc1EQzwm2Mx|@EH*ZdNv5ddb&+EsAu~xZ?kL$< za-N|cfj-vIj>&?llajbtM4fE7MhjHZ! z`=i0ZZ6^bvi7_$J0bMK?N?EZlCvRFisU-c zU2(jjuVXREPeQLj%+6)NtXrMAm0ARVvBpivMP*$l;sS0UCx`maJ3Jw6&LvYc_v8Ne__asNFnCK^iur8qZrqc>FuGJfG4lEu#O+?)Y@T7`#-Nfm^ z5#P}Mr+83PM6h>e2i!#teEw$bautXjg~?9Yhj#&H2}mkm2u#$uv8*+*Z~!LNbv4wG z+KFEM2EnHh8ray`VwHB4%rt4(&9N+rXMD;c>+RZli(PkW2?o_+s$;j(Q~ zp%Qd$080IVnxp&>1RS2PoBtb9;B8ES_IZ-DZ`J7pGM$aoab~v%Anamf_FyAhv<076 zmV`#5@n-qQh|+b=C=jP%88sy&Zi!!zZ@f%aiQoc-7l6u6Cs!i86*4fwon6!4f3LK? z1h2Z{9~+-A$<&r&GO)h;c}G0gso>$(*;ZGs9|(OrbSsH=;+kdRbg|JAi%{gy zTPC9(Or$4tR@NLB0HiyuUPQm+$+_ZTu|6GbV6`$OYDa{QjPX;$t_08|qd{QI6w?+X zondh-d~JZ<6`P=pj{`uc$bp0rB}hSgwuky;W8>y~(l3Y_>f&ou8#ngHlo8zzkR=ni zC$-)tu*x_N$@u6K`j{3Pqk)q(g3Jo=d&?eu-mLX=wpLN!3i{)N@V>rxfRG?>gX;(Gtb&C1D zvH7LAgWWZOZRbd<1j*b#o*Mn{?Tg1;Byy$qD-abb%S)@zQ()G9!bxW-A?!Ze=`rplU20hluAS4<8 z9_M(E8D$^3*V$33)C{i89gIHg0~+bi4#b9UjOydddr?WD}QPJlLVvyL|le)S==-ljYUd ztl*;@3q5B{`2gM(KS;aa-D;x$Kbc4Ne>0CCfLr66&8JAz1QEm$Px-N^K%#O zTx-73)_S|Ab9s00)+)DG2is4dT|+#$FFy;K#Qmfgn)Uhsnj(tnZYjjM#w@CqfJ6hD zN<@^Ws5rZ0K2wYv)uw`3GYcFUX9GR!si9+1S*SPeci>Wi*V8MHO)t!39ex~=mjuhs zOaGNQ zi2$lubX-B=?+B>hklI&%wo=g1(NC_xUp%;L>-Cn$r05T->fdSX1!{=w-Lr4ZUa{Bi zheZ9fZ(EOi-)-zRudnrQ|Mc%~s~&bA7s4dPAW`*T96BDVZZTMqoypF0DagUEWCxCn zO~lRct|9Vdn8&GhD`dZ!rSEgWMQwB$EQ%8^TnGZtO1L<^>KAut+F%!|%jjHc`xN@N zAQZYn`V!CW`q>Y<|MeQ$r-U&b;Tf#cpLf^9lV?aFdVj=|nOyi5|7I@M zedZyYk>SY-syRM&sQbO`12}A-t=F>~5h~@(;K$S3UH|t*XyGryT0a(egoS)vjXW5D z9@89=IVvuhIXO5?yhdb7qTNp#S)*v=)RI;}KehKz>$$l7STo$imhX zf@rhV1O(B~R?%vdRZ$f6Cn@c&uPV{u)zqX-LZn&}c;sNfTZJk+HDBKwz#+}^%?H(uP;+bTfeq- znVstXY1_?#Gj}d`cFG<`aY7{7&@oMuN2E}Ch2IW!5*CuR0TY_~-^!$_Zz&FkXtt)C zd{U{h)8~NpFTxp)&N74#FEqCL(;AjEw%z8u3a?p@IH`e@j!$zGZ#8`1);&9O#vgJ{ z_sPQjhvRXImGPoAl>9#D@1GmK$47P@YEPRrtYqXQ3EEOjOW*WbrPVdtmRuP2^E~=a z6T}N{Nh9O(bJYlyBA6{Op_!%GUj{Vsf4tng^Y>?eE+Gv3Z^S0NDExYF?&Q0)G$OJ4 zN^4=3Nv;zoDWu-d#>UBc)Ud|LUIMYD={W1y93S#AbYHf^0AmXx z&v2kIdH9ii5}8J~szk(38LsaErKjAoYmTimG5-DPH~HFTX}zr>FKXb1`S``ZcDyT4r(O@_?kTf{Jo(hH+8s~YU^ zsBhQ~tlO{5zastdzUsIsI#37lUrcIb5%`UDz~g2W_;_q?tE;$9s#Na$I#K@9-&cAs8n`?& zQ|sqX%r_T=SKEIsVcL3kmSAH(a{aH>!<5jGrh~p)yRE4&Qc|ynSRM!lY2DjWb=;-s zOHk%zBo6od@rB3k2N}9O*9RMM>j@1r=E@ZrYi=x|XaL*34)X^1G9b&*n&r~`AKwd} z-}&)_W?N(0(U(`*K5$^H4!bqlC!JQ1sTq{I?ybn4wQ}@ za&26bggIChE|ugKQTLE9mv?EJW5vI(Q-Tyr-XVL9UYFS7Z|m z{Ti1A27{dl4ipd%%IGfN7YbEWq=Rg}aSZUrher1iIW_0^WK&A&?a8_|+CwmMw%(`X^-2Ta27 z1}&PJ#gkP)gOl0m4}?1nEu-U#PwI8)|M(fq+=#|qTba3rp-Qa1+px4W}U06q= zp%kTEgc_(?DISk@JbN&FeO@oe>SXIGKjq)+;uN}3N1Jgaa%SK9qV`?lQ%!W#^IGvr z)CW5fR1IX}8cqq!vXK?`kZyT7P_9sN5-^t!R){K@Bh^yii%)QnWXz_3x68lQ6so?E zun$Z1%sO0V2ZlLoFgp$syUl-%B zFlBp7lU$B&Ilu2Jw3I{W^NRuGA>v{-qc&8m-xtKt*N-vxkJdoDvh134gPj~+f;Z;m z3Cp|*ZsF^(VHr2r_nAti%Bjp{k*A)Bl~aT2u^y<)SpO_JhplvpmQi8gl`823*hQ@f zFMI%us>RHC<0wO9Z4zKk%nj=Ee6&hD*3Qj ziLjK>Ji7qU3yvc|#GMPTUfc8>Ues|m6C!p7))XUE)lk8fYDCSau40NLTiq0m@*{|f z9b&k#Tm~!`<;Av$RdTtu{=?t_K&vNB3aWF7v9*S4OX+0SXC>?y{u9#WNpDHbdB0vp zWX3RwWA0V!W$UWsm}?RCTfscpC1bI(dv;-sfOK z5^Fbk)N3E9L0FQcC=gPqTl0$2mPWcaffhL+^R6^hplZ8no7g67=Qg^_l$I^pirdC> zLgPi1vZ;7AqGR_!LS%AYL`NUP&^TS1?j6#Hx&0jJ%CY+}*8pCope6)05yqFDMI9wK9r3uvuv|}HI9({1E^J!s?mVntH zWs;R+Vn#oQmMhGXfM?_!iGa0B!HadNWW@R+AgNBHAdsRlzO4CY)j`dc!7DOqXnV+C zqWe~HHeUuA$BsX{*-52^Hc4F6NenPs6-S}KbOj1cH-D_+k*RdJLna&B#_>8r$L^vI zWkbd|72F!O_8S*Gjxob{!U=LydpIbDwvN_gI9aDqMR1R7l#r`D?z%2xXlZ67X%;P1 z1IC|g114)Bxm{zlk}-zESkE5X#?}_EQWbq?X?>Kf;m~K)zVyZ{=5e4Q$b7_u`5t_V z`z-QS5!9l8+IQ-la>bdkrC7@uOo7RVv%R%A&jwx33POM`&D^5o3e zJZHnumFc8_B4+c-G^=?Kc1ohXBLGjFtb?EXFr_rv|G?RF&hzPm-{y`GjvKowvu5Uc zetTbZ_tSTu4+$^CE2$M7aDVfd#qJsPiTnLU`?zl5X_>b;^5zhC%tmtY>R z7@U|R`80q=cPZEJ1Js5_I^^Nh7S-tk`zqg8V>}()cTiXMe&gTWKUx`qL7h1oH--*g z*Gh-wQ%zfAjEwUEioQ;T$3#90Z9b?y;LI!<2-pn33UEG5VZ9+GDV*mKI3gh{*3-o8sn>2Ai*OvATTsxF|0rR87)GDL(PxKkJ&e5HxFh2~fK8g4! zw-|pve`0AS%CyJBJj$5&)CA;gimgvBvNCi99g*=Q5z$JvckOy`GRK3h>BpyGQZ{)HrOXZT$QuEq4Q=}GsG+sHJv?hj) zsD4*`aqT&KEruMSOslnCj{?yftROhVsYssN(2bxY+oa6Ci>ueC_bW@qsS&~rpIXQc z0#RdHpJX>1hF*bTpZ=c5EEiOl`VlCT66RL1Tqmd5v+v%&4(fycrrn*;a|tkO%24}3 z;?&*`F7b|!elXkSXp;#K#mR}o+NlnxKxQHu46vmn?NS|n@ z4xo&4+~1|kzjKg3*NQ6$;gxgLYgsa}3eXb7PL6+lue-L#D%HJ625u5-*068tc#>L? zCt=1Gl3Ur~@^>Djm5YyFlmxv|z4>RaJf*PP(u2^&(2Hu)buIvbYEu)^nYpu;pf!@N zcmjCnx<4=Pbqt7CAkFVfA~zV@+0%P@q{tHhH;0;F{v~=ma_DbWYEtBlns529qH_NvE+mblz z=eFJ(@){eg9Paawi6Mj5e&84unxVsH&j7U-yLT^$l^p}<+9u*Gt-!_mO)4KInQ&%y zPDbpv7E(WKb?eBE4UuR>$%8Gh=4rnx?5{P%9nByHtH)ImPJV{qXlc zk`l2wx9k!B6K(Ry0m>D5Pn^}S85SFAW+9J)rW4t}Pt(~JBP@%c7G`V%FtUjW1Zf?i znb+SYG>**}zt1}Szs}Fave8~bs1ofz{{SQMGP;!AiexY(2@VXY)sV6}E+|089BK7( zH^unpi5Q)epfD3H^^rJv)PAdQR54Gepe2=<8T%*s6|~plxHil8y?O9&WeSIU8{NW)IVMT@L%L>0`hB&iNL+s?u=#=HYVF9-rZE6Ue*kOP)T#F)1+e zlTqhQ&V;ZaDZzzPA&HiO0rOnm+daXaI*VCz18P&%lr%w` zvQJZBKEljwn5NnS)zdCgHu)+!CqL_YO^^;#qA9$xkz%^#OdjDZA+G~$I!hG`QzD_> z2&>HL!N@5`93B|es14m0_xefi=54mZ zUh&@;T05j2+#huI?(**o@1o!Qb>_p!5jM%JmpRUl+p%NIt>mv3c^dzdD|GxfSMUIG zh40HA+hI4_ZnoUL*V=WXt?61*OLIrfwMJ!Qb6v}F15ZUrn}lmjeda}WOhla;PIsBD zInW4JN^r$VI{n$<4oV#r?Xbx9#S4aZLH>d`CDFoG4%FA1y$Hn3c|IZDHa8$L?P=vX zg3x(iNqN!E&P~r!wyHm?GIGRwES4b|c!oCBZy&aA(EqpzwsZfuw zm6OwbFyjGgE;v7F6himCWcrzm7B`=m$;Us&%XZK|PCc2wY+qUFZQSnr1Tu2x)zc5> z0oV_Mm^)hheIE@4N_3_=EvW&JpQp;~0L*OTTkNQd?Yf87v+IQhxdmji)(#1ha^TCu zEycF0^(46zIvUM+q6xCR1Ft)3HP4{^`;PkVtPX}|xo-ZBr!XH*F2Cf=SY1iAI(pDF zJGsJHU=vN4kilJmF~HCUiBY!_u;~nzo6MTCuk$w2O80N=y=r;Vc9L`moONsV=h&F9 zpIhH2y@R&R@Ka3KznIvUt`X}p{t6jyaXDwB<(2e(?^nC_y053x!af+s45Lpmcj_1? zpX_zM_DMGMx5fh!3Le<(qT4>l6?0pfU4 z|B|+&G}QmwG;Wpj%ZaSAv1aA@HHTCe4YPX(kNWuC6yag&R=O;T4De7hJB%IJSaa-4 znr2Nq!j?OeWKljQUa@|9F>$#O&Lwm-fghV<>z=H2U05eDXks zPA@qkDf)qeBgkMa2EMyt)$EITmR5NAM&j<%`d%qC%IMr9NSt!n{;Z_;c*b5-avO)^7Sqx8gqb;@T6C_|;kUO_N%~f!nfd-hxtTRI( zN;V_rhpfnWv;r~?)cjGKs*V#!_?STT&q874b*57;sN{lb|4DT2RQQ#9e?jdfL;-#O zLwGgg6>Q#~K6G&R~}jv-@kh zW!Vf#T*X`*q1^}A(_;e;@ z2bx!A`#h`zf9F!lR}0&t$yZv%0az0Y_ucV|>6%h*wgL<$$NS!JJnnTQKlx&>KP*;P z^_Jfo`P%tHUWt*Rrb~XOy#}c29QYz^F?uB=^YL!2greV_JNky3G_Rf1n>T-V7`7c% z3dg{Me^=~bRu|*J102{jxU_%;b16oT64mHh zs@gQf(!vk_)Fz+1X<~Gq_zlUfEIyjnqS}-tKSYgHH6I^g5&oVpw{)Sln6Ltagi#;< zb=YcP-_4lKdfC=!kLnWibvv&$BgeHAwA&0_L5y7w$wjR&^iG|>XQ&2x3;NHE<21ob z$IRt?MtO#7RCl)Ory+(tku0d z>QYn>4uzOGu1|#i_vpGUDz)J2IK1>qiSDk%%$VZrX+Wsacv>jy z)TnkYvg@K@baT<1PlgeP%C1=juhbQ-2NaKzJj-8W>!PQ#`|4VJK-9kM8xdDxyq9Ny zEuPL;JDyY%rl$Bula#$Z(lc3W5*ckhWP zYd>z<@9!6-6B!F<`cLd`$jTysoDBh>Ilv3LbpZ33)49v=#IJH?c4#mvW=!nvkQ2@h zP&P4Pn(6IvD_@D<9wF9v1uj3_Wj&c$}A`FIFNka<<*%?qTOm>J?S8`LWG<9NPgYW0>LPTgA zKVC-C3l`)w|#e$ z?5_|zcNC@0&>*Bu`WbYS>v&l1tvgg&Ks}HGgDX+8x6D>kMSiaTJOiyB>Tv=x6 zv{*e8(DM;8fZI3EzT~j2qhUvX@TRpvzi*lT;_TL6nx<^TWBZGnn*B`cKqx>-yF zB)=Tyg36EBHa6IJzH6ZIKSkW*hh1-U@mhv6s zt40y;r`}8Zd-c^9+%LGQ6mLyKO}EuQNN~lP8}oSaa}iW)T_F7L6ntAS84(~>KxD5$4>(ki$%iMO~ zOpq>9;D!SE_J>1?7EmNd;y1CsSJ*&8!z_Li4ZK-k%r!n=9YeJpK-24Z#sqmuk_ACo zVv#0svM-vzp~7-shEtxd1l^si^1c2`qnCN{;e2msBeC-R?u!?!G+kR%OKBQ0pTn0n zshXvU&#h3Zv!BV4ATmrD(^*R>e@ikXYJuvvK*N6m-Sq?A_chNkh1w~u+x<`PlQ z9vwp5dm*FqBYG8LS7c6KCJkc%X4s`XQtrlS$o@8-!rv=V@wxz$p_*Rjm`ei*Auen4 zD4ED;?^12`9`kPX3JyoKwZFS*X`=D<12veQ5^kWUWA^(_j%WYvTo9mPhn%J7K(FU`%oTS{+BB_N&^LyO2D)^^SOg0hTCL zORpz_mJcfOZ$AKnxIg_|elsIY!$z{@0a;Ccp{324AS$%BAh&EKW92u2dIU7KaMF%M zH%(M}MEhFwo!L$yN-UnY#x(HAaf`gFTuJta)W7#>$GJ&I4iIaP4j zu8)#QHUQOZ0qqN4@R(7AxpvaFx%{uA)^^OR=6G32P?5m{(J)kpGNkhot7>9n92NNy zCi*5FE)0!u!fAYMUC9U0f`+ymkP#p5-Zsno;cs-=^gVHqQs=$T%GGM;;^f|x= zFc7=rZq@W5&&-a;Nmkvpr)wK<~SKxS)_8sd|Z;^oJ{56vI5wzhr;vY*F z+DlrT{^+0#ARmlhfP&INmT_=-ni6ZuS9v@fz31ZHR8{z7wEl8)bMta;AID`OGhuYw z_L506*_=t8lL82c$COgRI@X6*6a#z4q$*u$ZnahEMNs=X)1cg-dGe`^YApp(o?3Td zsCjU4?~Bu-ke}FALaav6&43N9ON7p`?Gd==2BV%u>WLM8f~I@>-W`r(alWpmGQyh@ z>H|*fMV{ArS*E_p5m>w(jNCOq1U``NyiwhAF7HuZ! z`O@YRxeWbqf%1&0cIM#89IAhael_3WDDljeu}8a}GRvEH@#vD2jiXqJE^}u3G#j08N@WS>F9etRIbByYdPaj;1JN0^G5b3yn>A4&%w_F&B3K>imx4>4WOYeONJ(1?LS96`HT6>#90YVzW_e6cx!xK(V zA+FU;xR`-}>Af{*mKEM7U!998$v$Y$w43ci`ai=OWY-x8VUc*MKh&j49IU+m$U@)t zYoMINRxv>8CRYx=mCYUotJV0NsTa9T4ndIdL@k7IUx?iH)xBnrpR-{N z79X<5yyzJ(QJG$&%4uR>tT0@R7-k5E-;#8FTs3PKbPHNBZ~}*B7x*mFZiGL5?NiCM zER86-viM$~pT;%SySQf#$IvF$Ce$k(0T`MHI%bc@vJzHcT_g~z# zf~s)@-1J}T zGSNzY0ciD>FZ4stI+*w$^gEl<_}RJJXk>ZphinS(*EgxkGoqlCHq60n%}~R+TK|YSQK?5BN;VEhWb$9HOjTp8`a9j`esou z10Gr1dPRTkN>)X0}Eg3gMZgkEPrs53Eqjb=R+(1K-1# zl)>>|)B!(%?wnuksniY0Hkmu4D$KmHJ?8Sg2XikGp`{{oxFz7TXNWpzVP~rQAua;8 zv_l6%ON5^Cqu6ag@N{`xZrm*WSSmSHhhWiA^~~PXRTMnKA)Nf-Ut{JvVrA{`VYT6d z)ETYC3$N=B7ijjNKSpoN+j@U}`p=Ko5F_ifYeU&HOUH0DTz&5bW=S&_lMx29&Cu-b zo6=ho;7b>4gsb z;0>=bTA}*cvcDp&mw1na<(=1)&p-8~`i<7SAwK;4{)8VcpYfG<@i=!#|DT&j8<#YC zml{_)M-LsZcE|shA1Y*lOfGyk77{y zhAwcl6q98%IS;I$xzLpUM>YqXcFY`0j`8&Um}GQid!A0|b~Jt(&s*oDPIAtsoY;RW z>e0hd{_PuE-@fqJIHCTG6k&dH7d-@j+chP9=-Hl*rL@sy*X`Ed8{V&yg zR7^|-V%XeD87>GrJEgPk8ZlGe+w249<8U$V#p1+Bzn9$JHz{W^Ir#)2-(Luha!`BW zaJx39Ki0+rJAR7suC)$Fs+?5#o_2jVitfpLufAiuK+}<~JIxZHyp%mgYM?Kd7g*1t zbW~xD_WvABolo6H=`ejezF)Ys3BxroH?+Nb&EX95?ddi7fLqwSNHbqINDRC5!r78) zm7i^-v*Kw{V@MO&n_(rTSVrljTc{4*Y@1mfQ_9Hj3IK~pwdV&{8K{g*O)qnbP!qJG zBlU#zPC+R<(6zh$+>eKvSJ{4pw7bXZo!xy|9_Ub!9loJP&x0|PhUrJ%oqV$s?FCDr z46|}g#2*hjSsN|C%Mbs6K8HGz5tH$klXKXAW*4jgsU~Pc_Q0)i9-nb4Admv8Me0e? z!zn3IbWnZkc0QY=LDcrSd3X1Vueoz7MJ7`0J`Nr*dURxxotf_b{?rCJ5>+JJQvG>Gi&G*UZCDVeah)03m8o6aLsRt}i>!p4 z?1^YHlT44TDFzQ*FYb!W8oGAL7w--SG!kX-UndtGU0#$w-d0!x^?(F1LOl2ntY~_o z#_56rI3*}t%w@~CWL_|^%*HXFVX!deULFz*7sh8SPK0?GZ3+3%99A!-79e7#l1Eod zvKYwK+I;40NSq*kkvPjD2v}i7Aixk7lT44xY}0vRkP(ko=*#IEW_xWq}RUIO$n_#pIy4 zFv8bXm5&{LU15(w&y*x_Q^R~#I#vMYTRC@)vzP<}_*}!H0ggo^eMW)q0F{ce6y$x# zMT$FI8=PAmqrRq{&8T@fA^+#Tk)93Wm&WI{NoFaMR{v*kyqpm{ZK&aa@J~wocu{hK zPBs<=moQIyeZ{H1Q^x0c1DphS-GIk2N6>N}@5NOoa4%-hz5BN=$h80de^kABRFZig z|BbjS5+WL!njj*s37R36qksyExwKi9xuBR@ ziY?lnR$5xqJ~Pcs&vie~`91gZ_y>MH$K&A~^}4>-=lg!YUaP}*GKNRq8hV%YI312+ zM%C5!X!yd2W|#m9OTVVFnNiQU=daG3NO`d)#;Yr9W$;{R>#+W4>NbiJV@0TJts8FC z${z{Rb-H#*UXtpIuPlK+yM1G-ioi6iqq@m5&5qDv`W0h7A#XnF=Yc{q>cCpUp+>tg zgJ*|6xEHzS6)#Qvczf>)+IREhFE6W2(i=QmICJ_XAYeiko8)Ahb+i-q&UpL(ckS}m zDH#e7#;{Nth&(dvpxm+=&|T@Ea7$pzPf~`@HjX84ViZ?00e~ZsEtK?uhjmWCh|Zkx z)|9mM4m1+0T{ToydRl$ITxB%)&%e#PTX)&}|K)da+4f|!6FlH^aUDlSv`&8iF4+ZP zGdlNp!=>l3;KR<0Ydew-_U=~1+{-G2*jm{`@NGPeCg{?G@j&fnkk85@?sh2mc~r6d z&!i0lo|Eo~y@%?WQ~!N1O(h?CUpL=zdr%8xkHu;g6&Jce)+M(Qrhhu?mvu}Kp&ZhB zD~d8D==4LSDbwsJN`-CiEkCDbb1Xf|@Bj&6!GJV3;PUVL@wHfL`JN{`ow@JxJaWI) z5vkRu7(5x@mRKWx1H$~tm`E`YS^6JWvDN(L?vJIC1D-A2 zJfP4<9OH`dliQ`D7vkOKBr9Q+qF~*m$-ZQ8ndZ3TerrS_l6p4oFfn!6gOF3d9rc~q zQgCzS^oirHWsugS@D}devF~kx_b;~`JV1&J%zF4omd1uE(6;YW|B0Ef?Tp4O?A>~} zw>u-+PuQWpvJyGa3S&T@JOf&z9yZqZ2&ND!Sxgusl`ZzGrmp>OxqSq_>A96s?8TRK zfhueCw8IcPxl~c?A`XdOHjEqKhw0yEVXEBit^eQbDu~g;hH_$d!e|T;9jj)4x zQkVX#JCA%#ZvEhzP`O=m5DMj{?Pxe?@;1cp)KLfpdzPwneD?mIf41r3aR}Z0|2isr z`|20Gue6uy#L?$*EoQlE>JR6iW%`4mt)Jj-K|r$7L3%7BJ;H5esHjo+=Ou-wb_sMl zs`61euPd^yu>@sN`r%Ncp5HUyi*mxko~c&`ADKBkEt$u%6#^xWM?>qZbxF0*thssA z71(7wdz6+!)(;SYTq{MrSHl$mj5{@dwxLW!QxO5!!ap2wJvtAH|A(=Gz9%X3BJ{5cyCgfrk9p@i@umC@Nzw<0&LuvEYn+eXa$f7GxVuLMrnMm<8( zJ=_4sKVg+k4#tLeHVb2Bj=P6jJJ+<*22v)y94g&QAra;V4!Id#w||ueh9>#HbIt1c z&&nKQ^XI|qbg7S;AD0Sj|ZKuZwv0ipI_pk16*cF%vV`{G%k|&7EoZGj&uD*G* z2h&E&&doE<#ZbVXnR8c)i`s&zqs>>#2P>CNoHmF-94`ji!0rSh%5gQBOmj%n?x>Wj2p!dNioW@-XbH9PhKDXQ4uW8mbQCS5)5Wzq|u z?L*DB%PhRzwR^XaEux}Y*a#a4+StFXw3G%=zPXXJU~vf*)xqIl`<4VKHFDJrUo(d{ zk5CEliX@BLG?Aa&fmS}cxUQV|DKme=4p^3Nm$cq%@v)h_CBikL*z~54Syf#i=But} zw#CBt2N{>_Qs0}w*76BCd!zvkN&!zKt92ZSCmv$H+Btn2WHw_~sid7!JxEk7)Pmkl z*nTqmKe-44Ltr-X|2R6xz~{G9`7g{TcAY$VoOf5DXzLi;U3FY^r|0~AfoS1IPyVIY zBQ9EnboR%0o*BQDf!rz?PuORtc%{N}f?~3E1Ef~(gAJ;S8i=q*1+5Ip)~IDdw80gI zp8{zb^(4PPS77Kc!;VNaXtZ^hXh(rAa@z6u)lw!$*@|yvvU9IQ8mNAG*6ej$FRfup z!dH1DBskiw_8)<)9lgeA_;in2X1jV|>Wz8AvE}g7Gtj-GQ^JxZ4K{uhxx?=KPrPr> z$OO;QQ7-q@^!!0p)Ty*q)jZMKOR4L0DJ@VW(;_rM6itX{p`bOn-oov7|9xrT!C)}$ znOD6sx7{(#9*+8{thqFx%-y%zO*K7bOl8}AXl<#+&$WBrIyV!qqXwb8c0B&&Twu}6 z?x4`qZ~yon4yWe1VT`GXF0q=Qaf~6q6T@1Tk-FK5X3F60c#Gq@L=13#x^zVnqO!xc zrpY}vx3FmBNO%%AE|ZUXbsX5H+LrIZQrnksgqo2A8`w*aQwR0*{aW0BjIpOebM=|ulSdi{^K#0HZ30+iXJ|Ogh{l@%h)Odx06(qQ3qkfY(W`7? z=h6ex{5%D19U=jT4Da>Yv`NIygV)@|o0xdhHOjilaS1j(4NbE>rkqVl!fiI-1xC<> z@>+6AxM;eSNl4`6BU8M+^ZUuJDpCkJh+m4?XthV+=rE;U3``?7Qm3>x6<({ZtZV@- zyEYFouNdOa168?1<9ua?F3xNN*qdi!V#tEzChFA$_10D{pcE80cc#gb8&bqH0;=w3 zZ*nC_BgdZeAN^`7CK5dOEhsgEFj>p<^~5k$xG#)Fddf80vn0))bBf_D$!BHOIQwBi zl{PoiTwlQWfMKV)X2h)qfIjjqT+}$Or&>nVSVsR+JMI5BdiP_Nn@rDb>h}P$Zlw&} zYf{aER`6t$hou^`HuMF4l5m{vp^b_PYDlm{6|lmS=W^g92_zBYA;w$*2QWA>Sd&tW zyAn9G9(QPR$ku7M@h@KYpx8sUrmBsFDRv%rEvlq&c@r6K86}uwbAVjp>?*Y?hqUTp z~rX7d8lFgzf>00cw3vK}g2#>s}v zNmtPIgY+S(|K}BF#*-&@8YyP8Eq!i>w^~N2B)~A-$TuyT?l*%{B^wwV@1c5DmoalM zlGzfg6e)u}ALj1njcFl70@Ex~o$#k)@8}aBiru|1f3ar-Wm;=<>H#g|lt}Ktqc?wU z;pTq#Aj7YI95B6#rsErH4EA+uqC#Jc@BgbS>AvDCl+?rXUGQ+8tNcP<3EmqV5Y@##oz!SF)MGPx&Gw3i zDuv~QBB;n z{NTa^dz@eP9En)aTaLqA`V{`o404a;bVgHjEfW?+6XrU9-7m*B}%A-y(!kuSZ5+;eAGNFURqK7+A1m%>ITM+`w z`u>q0?p9B~=|dA%R*0V@UvGza>zfMmr>0v)wI-uJ3Ho$tv23iBa!a>PH|+R>-O*b> zpI`lb>D6ypeWc$aL=j|p-T3#%+qX4A510n#X~Z8|*qstEb2n`-?ET7Cj@&YxX}q*a(G);fA(~}u%^UR5{PuBq z^G^fxdvQ*7kS6;}PiWMgg&zVXqDJYJwvEqcUJ8a6d}}=XYpEIV?i6C6^;a z`8j(28dJ3}%s8JDP?;CI3Eozv?Oz%RvqQ*xM=)x98g{jH1xd0ZXk=#>Mz9wvQ`7Iz zzp%Ea^hG- z2R9$Sco)AVt55c!9uV}4-+kcQXXcS$PbZWGE`|kOOYr{RT%nLoTD-B zq{^{HBG{^2Hu1xrDKsVPXv-0B2khjlmasJYT8tve98iZKl{pfL8L*X&wb`WtAGI?r zL4fp$0}~ZVeB!3~HDn1GOb38wI6qhz20->B9q}ks#pqIHZ3nC2gt0>)=ItF`NlO8{ z3sa|o?4pdP@jwnsl7L+8(aZ*nlFma?%$>l_)|9TgHBrYUj0C0bY!T3txap5z zIby8<=?0A*ZeEVp-tp~&R>4E*;QK>wn#l#l{S8>j8$raoDcmRav9+A_g0I(B*LLWQ zy(>6*%4cx2YLLa^_oK}@lua8gUYDt_Ty-s1N^!}*D(+e!)a(^*M+0BgI_LvV7}V{)Yw#8GPP&)U*0mx?Nq6&Axb0MyVHP9~;#}0G zViWtao(j`mzBx8cl*n>-*KV>u^LJ_dZ3k*BYevr4 z{G0!8NV{5_x!!jl9EZ|;EbJ1>G15qgEPM0d+S4E0-un&rFWM&Asrlv$eZOk{_3YoO znD0m5#1Gs&FQ~7I7++Wj8)O|Mq_X*NAK=_PR(_%);pTgl8MisP(g$6veZzkDon3;; ziWeRIusgDjE{o*J2_jfeyF9hR&sfvIMhKInLRfHixYPU8HD-CM@}`@@@%z!!mRoBt zb#FX!vypb!$;x)F5w$jORDR-wartxNMp9Ko$?gK|L3k6FVV~6Yp`kwdG6hxS%i5o8 zxU^*5E-1l%g}xvf{vf}Ljsk5*y#h8G5I^&eH^WdEW+`i~N1el=r3oPnQKC?m z#EB&MvDc`=jj#jJ98CTON`w2=$$_J@2Eckf0373 zA-W6of(J_`iR8&NAGd1{Q~hT{aJJUnhBEbpLL5-%7q>r=rbmu z>G%Kp<#L5>==g5TZs130ta+Teof4tzfVslRZrIu4WX?m;MLDEb>P-IZLMG!ZMxVn% za(Ji_s<#5aNu5fi82}$um`O85Y&oNrWsvJ-*`gkv_oIjE`JJrC+$=)P-0R5_LMS>` zDF84FnbcJ%#z-;v*0LVA003uB2b3aw<*FV?%c*|*flb{pALrLO#UM|Zf#s}2A6Y>_ z2bGESyfe`lEQ#Y~Z3ISth>^ey#whkS=pwmaWMYEoS!wDXzly>Zgv*7b19SJfb7%px z0Ke!anuAOsnCq-(wvR|0+^g|R>=oj3Nuk@XCLO+jXUB>c=5DH<;VT(5O$`J0+EMe@ zAEDZ5789+~dHti#<}7kw49v3*=klJsjS1K0g21Fm%VP#r-!s;^85Z z;mwZMK7CzrJi2T5>+Poz9R}~O?HCApdiS1JM8D1UC5fZj=9R|ss2(sZDXCcckP4JO zz5YXUEyCEQaON24q>D>9A)T$nWbu>2XIHrb3j~d;p2EhxpQwp&jB;bb8gh{3*Eg0k zU0S@2b)QFU+tKZ1R`xBUCL;HBFW9hF^MPsX;ni-^U3KNvhRFrEymofENXe~sEX>~DAy80l0F64k*+kL$|w5g71UoBK13V2MrQeX4(#74vsi_Rws_h1LyhO~T?-?vpUggv8{<~s$u=~t|`Msv1_>{y_WPeDVJl2lUaIEr92;?WYc zJB@_fd&b3JTaVZwDiKjl#k`3ezt~uR<;2_A{M%a4G$%D5JpIISr@TSYWVe;xMYmnL zoGbeAhs!19?9~W;Svkq+aoa&RAp(#Rl!FykT8iGgyW_qeO2A_HyFt&i=iL5D0 z(r=$O)^>)p&kq|Xy}EO_=|%-3i_~T<0~;iV*H`UIM%|?Whjo^=^_A#}s?M+pQ0<*9I?P@GeWmLCh#7E6(^wTy0K7UkQ4iCTz;#Eq z&o1yluTP_dK`(`y#h`5s>Ykpt366 zuwfeuA*?#6^W`3v>2kHYFc-z++frkNKJ(xI>9)2gz=Xt1{_De98V6Sb-=w`Jn`1zox9SI;{Lao%QR0&i$ z!&mS6^x`hg>`yjvl-rnzO!b?P0SGO#%CG|*WKw>dev<0WMH5v& z3BunB%y5WGH)A#1l3&D`K6XY=ba6>gwJL?HgCCsYMF(jj#nBLo)N?C67+hu1?x2SX z6B~oNOD;TJw^D4(yDdwp^2!eSZQnheJev!-?llxaPMrSd+)CIp$736gfbQfsLdr+s zkY6s(3g6ud-0}F?gOQBg!#f_O(!afIIlV7??fC_l=S+CZ9|_FUiErABL3&mjG+8~P zwD}$znk~Y?qG^MEP{fs?QIAamepyZD6oJv2)WAr>CMkyTS6j4F85T*tMl4GqBkf)E zWzB}xj1?kPVRSeZ4rP= zj3}88+hP0UwSK9G+q)dV04C5^Eat9gfUf`XoP2hmGaB?Q{Nes1A4baJ4n&*v)DXN;V2^8(jOY&QxAvaj+TNMO1hIX)o5EG3Ubx*%i61%2Djw|Kb?u>_HpQI#@YLHmlQBM^g8Zi?w^6v`_ju`;LnPmMVhSKd8m1z@@X)QsAeJ7gG}VNy z3pX`fs-;nL(9uZTxluR10|t)M13N#LV4-DYmAQj8j|1(j%6s1mcBoaEY?3!5Ng;yv zi2!X<=_u|hCcGW%D_=`q85G8!=Q;^f^SE8!7EOkwi3juk9S+27efd!D-6fn*di zCh?3|iIK9W+#JwAVLP=|Iw>Kb{9Kzr@y#MvUmnKFzIxU71QQ8jY&v-|Y4P$k%a(^` z|7b`x3t6yIkRa9j9Bn(k3x+sBr2w`*CaYYSbB}9jv_3ioFjB<1`u*SZKm{G) zm8u)hQRw*YC;OpMj`%;fZKsYNzx#cfqe;;Yvoyukb{h>X!_rG91#vpnDwJJPHfcp4BU@!UUL@us%?wsTgY`!(6&nxM4dzG7-zJgigLzdsM$8 z2F=c;rDaq_Z<>xI)rS$75Sif={q@PmlSq@19f^9@^%dwX;&R5C*jOc8^1r5J5O z&?Q5VbV*ZFrzisPX%w0qM+@}&yq5ag(t{M|NJ597Op5^XBbSY~F?f#ZgjR&Yrh|`JSp( zq+L$ayHg$|JAyTUiG?L!ZRxawQqj3ExS)I9ki}?tLshOenISPrb3UH8gHU$0xtW>w_imRd_TFm($GblU={ zSKc&7TRvzSJ8~RO<0a`Q+t|4|RDh|)X2Ja+I`gZSlP%=l#K?msc`R`J5dKIw>8)`1BlkH$tS(tnu-$1o+rl)?#I41zx~Tt z>BSK?jch1jpS3-)=a%oO$Mc4u)4L4I(rfpO1BADZSFTs7+4&G94qiv28N{L>&gMm|sSOc=(^#Hl|7bB9=O{A#Iulcg|tl9|6d zd+ujrhm(P5?1FE!E2A+7vnY_M0G;C^%5jzGuFi43^i#&v?Jv0c4XbH=4Zch)g`knP zJBSdDom!;ax31G!0_=!eU9uWbFw6U;DmI)|2UaDu9or&bp{FFt@22Uhb5beFVg@`g zW)}WE-AA!@;qBRx_!HWRSZ2y-x@-gVGi!y&uuaoD66gT(xePxVdt*oNjkC@Fd2YPn z9Q8l<_TQe{@3gY((C95Kjtvxj@=4;PBq$2Xz~C8rV&CnjB@)<)-Lhc;QorYN%Pe29 z)K75PnsJATrr-_8X>TMV8;xIAB$W{{1rE`_PC;gT!+Wz#3wW==};(jcm|6A_8+&$dY;hUsZ{f{eyT#tk29*i`` z7L^Fxl|VvdxNyj|d_BZoJ;V(M#aGBwhn!_uMbf zebr4q`FYpTJ;vwWQoQn_Wx;=)VS4lQ4zxmA_6VmmhN>Jj#!b-Z3Ly7dOSpo;TqzVn zM5A0dk1xz&b;$7CiR{SQp#6wEdAfhI64=+q{CPSzPpA*yeSkDRv*Y28^%L*OByNI* z)N}PgB9F_$fFyl`tg0HJj#WdaqaBi2Gmx|&rNlCXtEqGSklI#9J{k0qUuSOTV)eHS zqyq|;D*DWI`I@bO=q6@3Y+f`NSE7=hQ^z_#$GuU-pS`)vxWBrGu1z&ZXqw0mZ4Tdo zIs>oN3f0qalBBk*q-u?^(6dG891|SB)}v6n0W)&vjk9`TG&8&e8Gd$W5Lu|5bRhp? z)wiNku2Q}Jv@Q!{9e0z*3%9!_`=%wkFKP~oKURQ?2<|}ekKr7@;`-{-_ixiipl8oY zeaWtvWyZaH3$;WDGm-7lgX_!~bfsO=s36NGqOu#7B!f@3fPP7pFIaL~Ok?A3KcJXQ zzjE7jcH(1!(v_0#qOo~SOif13hu8fgpc6NY69W&TOKcDv$ZXME`AiVX5eF&dlYWyGe(Lxd~`88h~sgW8yaU*s+O&?1&PjmiBn}4 zV`E?whPx#O2g6r-Nq-@q-;1j}b`d1>C9AO!`8aYwl>kc<?lUqV#YKox*Jov z(mg6_L9n&vl?1XLR*MYL@()B;&e^CAJLpwFD-srPF0}i*L{cUVUjekM)sAhMq3HRC z)*dN`(|TsE?R=brU7yE37n`-)-mm3Ed4u=v#gC%&|8m~n1kwjCb!Mi=p|M8;swh}; zZ6Y$2N7srV-D_TovHC%2y2^=fij6t!prX$i^f~|gqwvCmkpI)YNci7mf(lF~u2$!C zAqNM?2A&_U;a5E#?Cu?pE3KLR?Mz4S+17t28aDUN9RE6XqhyU(dQ(s)o{dp-pOT#a+XVDz1c zyv)Y0**`ZtoKn7-++#XmT=U5G#3(wC9cpmGZjSpHGv~O266I}`fivF;4q`Oc6XB5_ z^f-m1&k4p#6|1ld9mQ zL_TJ=wipKY_r6$5A0>MBm(bfs?&QR&mgi!P+aLo(C^Y%xqsON;3iDIFJI|f4K5_K? zPd=1R%#$Clw#jJ*vhb(Be0rqXAea$! zl@-o?=(NEN^!=rK8~Q!@-rIBMaAeD3>T8YD$hXkrKO*mk<$SmL>d)jKC?7l4^|s^k zKhW>!olY()U>wa6OBQY5{yn8yh!X)oU_40}<6m0k;2A)3x8));BBwQg3xE2X#!km!HQh+x{1G;(K`56G^E#>o2<6D-_N>+El+ z-a~Uo^iYL8xm8l!8kITDu_WMW4Sp%jEe-jW)$08DNr{$5a^T>Cc_oG_PAtSWA?CP@ zU~k6wvbFPby6bmz@t)@~wH2iKl_yWG{ocvg!bIDstJsaH5k`ZBlcqoNFMQ*q|7h{` zdUFU_Ni&Wr@I4-$O1%)3+>pP$UhoLQgR1=;+*}rs8ugbN-2FqjR*sPyz8pDx#w$Y0 z|Gtd;CG^In%n7jrx;U;1-!L0$v=vs%ivBMvLLX`yWYZu)@-E8jEw}XukXCh6hgVjV zL_ljqZ(!Fs@`f>j5QH3_#AC8Ots1&o>7K8P`)L;8MFNFYp82u=I|D^g9>2agHM z9X)YKDDFSUFVVYiI#ynEd{WMN7Nh!Qc_LI1Ti7D=O$; zIz`-=k~j3YOnZN^7-m=zS7cI2C2{i^_U=~h(^JnRD3MlMYrUuujyRi;Qk?rB)>}V( z;#E?EJtr)i?4y6h<7Q8U9dEZaY>;pC zsY2r9IhY?zf2qls6}5b#Hl10GScMw$tBD&l1TI}+DCy&gp~YmmAlUM( zB)GSt*ty58x~9swI{~e&11h+j8+K>c{-^0%Rd@6Lzw0)FVJBZ;Ix-i_freJ?+9EZ3 zWvGD)0)h-BCG-kjqDMs_IMB96Yjk+@&~^bosY-W-ZJIl2&y=fP8c_87e%FqX<09hKpZ%?AR>#*PAi zf7v0RK-a0es+)pM(ln^u z$2^S7XnVNNIIOKv`CYTz+W#A4bCVA#Y>aD>&^bT3*DK0^Ii7LnWPCG?ep~(hXiR5n zPG7F+gO$n+;=(}Qw)@O-{1P#}18aS9ZbdsVkaLi2wZpTkN5m|TC?MUnGeNTryP|28`{MtyRWxZQzIE(cN&GfJdTP|kd4AK(e z6nI0AG@n2ZI;9_E;H)V)4&-4kPTXlQ>)AM&@#OlRkJzU`^LC#VZig-U^^S~p%eH~J z&-Y?~mi`kTFmSs@miKEgn@J1unWI%(hGd({QZyJ*+P*P-+1>*{B3C^-YMVC z-`$i)N11^NeF26BE&V|xR%$(a_3p<_n5~(PBF2f8$LW9WrTQZbsQ3y04?d8Hy7*p@ zZ-5e(Q<0A;@)eqZ$-`Q~2$r2$O@}%$bDYo-TP>|QOE2$y<6}}E;a&|zju+i2u%ULD zN>W+K@Ir<{lN{b}k)n(o7hgO0v;DY>Wb$S6Hz%9?4E7eA!;|pgp_^~E+eF_gC@EIO zP}-zny^x${>aLxg{bI?)9q-vA6m`GaLtC5Utpze$G2hx0Hfw!0pT0l8Ic1(3;D4ll z(Qa@-oe$0O^cBk|Bil|kYQn~i?Hnt#du;G1V>_J>G0%w2HjtbtFC|(Vb;{mEUfld( z5A9R$TsPd|R~4=m#rcRYx2KGA;rQrw2QUA2uSO;HP?f5~yleeXP*Q@t81p2Aj4OTe zHe?@(X#4HAbH}g6&aS^)+%@&)$*%K-w4)4fzwEh^h`X`GJ--Cm)5{$ejUT3+*d1mZ z`8?rUatV~eaj&d_NX^StFwi$^kaG(r<3@Y{{oXH4+pvBkFxW7NmB?bTFzJ>7;O%^s z1aLjqbS@z9#`+7M;f?oXJ{N{oc&x*jr|>MsIP*j#)3^F$NlL^4d@Nng#INW8P{gf2OFXgRyuM=6~qG?RY9n5qzj2|wGt~J`2>uPJxIyTdM zh=nffk5}YgZC8FauBagcMx@YgcB2({ zG!e{mM=}3>H*p=S>1kD`pnrUdIv1+GJBQe2_515Hj|k=Zw{lB$B>Jeso1Yv?P6sF~ zN|UgTL)#&gGX2WgRKUzeaII#(V(CQDU!GrSy(kU^>f^Fy-U9D>wJm)7oWkWn>$h$L%N?$7>mAU$7LO{A|dXw z15(EOt73uw+RLj1*_?f!)~~Z&n;vncp{t_U;@hcrLzK;jX3lh->`K%Ol?Ya1A~6B~ zO|31v*XL=+HimUkTf?rvDvE)^%!Gs)lra_4)R`l}O0hdGyfs!`Km9jWVJvjytHEbq zxgPYd^)}}I$uDi}_t-N*gtZFEAXNC!@vVHqF>OJ~|ln+2nd1fE}?WXr0zrXJ|x*PFw0_Ad-!%*}?6q6+sL%qliyAX*(Rb`#-wBK}nIu;J4 zsxLZwyDt4sDUeUUIdHy5$cHDW`LVJHXEn^_Ya#uNc zFtq}6a05C{FWmJ*SKoH``p&+HUjMIWHpKcQ{d#U=<~JbQ_PuV~T~i@n zV^$MHD_>wNV$883F-8yM^EzkF1uOYjXr$}@zhz!24AIt$M%B^Z)9NO@fT})Ph|;os z+3T-v6lTJU)oJBDKp9x=SKVn*CS7gLu@Mc-mB@ZZ*wI*vK=-U<-++1Dqrl3+}-zAO2L*Z1HNBR4sIGMI^gw2EmPtcR~HX1-M9en z2@M*eD^_mn>ciCemP&k@a)s!QxBz=gyW?owz&AxXE*VW2F$% z4@7EYI#~#jS~+$)wKNa&2|Mi>4(+&LCmPPo9;&i(=Y)=A2Be*S@`j8UUlQ!q#*7i~ z;vVcQGenT0X8zeXzOD7h(_94C*vT8;R%razGnn^fYUa$1zmGjWe8oi#>kZ!={uXs(i@nKmyyo~qdf6J}w0;lZ}!f*@I?HWXG>7kB8 z0zt-S7b;znx@2^$@JU5 zMd0MoG}Qx6AVF*kv-9f%zi6+4eLAF99y6k5$=Nw24bq3xk)|$M8@K6)7i<~q>T>i1 z`FiMC<^XpL7OE!|dmz%8M$jhOjnd0&MEM;xR(Z>DJ`BK$E+#Doe9OF%p}5WX z7;iq9vJL^F3QaFo!g`@zr`*t{)VEh0hI_@-huqa_+!2~f-9zUX`1#+=3b$4rHDC@B z%_aXl@qHg{qZ>^7$Gh8!{>=G3&9^+!4D_#k+FUw$bzdID>i7RG&ueiC&E9wHs%d?l zsRe? z4Y?CdZpCU$_v=m=ZWlAP^^O2x!R)PDC0$ zI*DIrOR#u0BcQ=i6qL08)ze`eYn(%8RMiCAOl2q*&pZ0F3CEZ3{=SThub>8l~4@BuEBN8k@N0#zu~_k-GHuE|S$lNDEMl+Rgd;)5(8!Rok^H zJ=74&hpF2qivH118HenfpFYZn1XO&ufeMy?_^Va$0oIB0#IpH{b=SAQ{L4OVas>tF zMsN8>VcH$>U$y`dy|oG#@F39XKzipo{mi?yfThWn*7>ikQ3vx|a<)RKiH!wz`z?n- zRaCN^Xk%fRE$>CClya#+)h=ZjxQ8@I7dj#4_E(AW8!0U5_HTL`|G$b-+o*) zK%Q^-AqPYwS>Dw5LUwZ-bJjG;!^lGs3oREvQMsWkvs=?DAAEbmS>G7nu3tU+pnvW& zN^SfycOQx-giECX?IJhx#a<0KBKw`@FYT*tTN*}5k=>nn2OFTDZ5^1i#}{h~9+Q?jsIK(R zZ7(U`tj0NZdr_SIJ@VTXAHjQ0qc9tWtfIi6>nRR?4%jXT=6(vOIaLgW!95BDneaVBsEbq)=@X{If z5UpCrX)l_uI2s#13#+(oaT66ZvvT{_Bf-yi4C@z0j;GYoH63rBJg7d$3%%a-D@v*(dv2EIEtQQWf%gJ70Bx30UvSGlyLXw@CWJ55ljb*l*- zsmum}d`T`K>x$UR&nr5orY;Aj+HjupY!w9eC2N_BcImE`E0HVq64}dffvy=M+?1{Q)6)h#4(zmk(F_i9@Xm7op5>o z$!rj2=bZtyaS87G%CKc~_tS0PQdaiJI`fB{zZ7s|e~rnWAfK3@?z-2s&(27P#Aq>D zo}K>Wa`}f88YQjap|6hX6fS6AFE<~OlHdC9TpUvL}mCR!>{>w<{{%X9*C8iu!W7bC~XVp`ij zm!&_JT zFqJ8`Mz%%X7Pl_C32r4XGtQ+(`$x7ukSM5R9N!;^(nxDmYmAign*u&TO6VU;5FbcI zCU3^s&Gx*pS)J3@om?yzz5;%?ZTo%xK(0)E|Er~t`7T_r=n>+7K^+uG&p^D``pW87 z;};CcK??PIl%ErKY-Y0h!oA8R>dT@dus4kRJ1i~VEbVCk1LkAxZ-(&_lJT}Uo$&4p zRsC)U(EIu!tuYyA(5N(iP#r|80+>DZXqDAU;9$f~T1Z0(3%`)y#IWz9*jo{p z4LJs+p!SH_uo~B~dDj*g3NbP3ALml~BnNjZHymWyje<8_dLK3H_apDM&F4nMm2`3b zVm^ep|E{bMqYmPxnvOT3TI-7I!*wi&;?EV`NqQ`r=mFraAlcRTC3l*BzyJ3aUl1Ch z6Y2~ga?N`Vct?_h3?!guS_&qEy6sE4AM(x3y^Yzj^FP4ob~LH>3# z;WO&tKWsm8xcl~HVxtvJXw-Ny|6-Br7Qt1b?g#Gct5 zeJa4Dw(^`d=Ixi>@x#|HR8!u@#vOJ(3LduWQ-tcRAGP>b?))v*#dpEh9wk>K*-`|W zAOWLgs3_j)S`7fb(oF_flB*pD-D5E7{u6Uy?>Xmbiq%>hTWxdy;WHMBu0{w17ADAA z2*Z~;6HxEvlMYTVE2UF4Ni9k9F($PtH*8w6B-(ui=5RmbKLIgUyTN4KV;x#X{?3wI zYS(=a;p@ma^C(vOwv#31xze$fcaHe0+{3k}c0sj|O?-|xq`fvS*P9R|D2@*{p z1VS?ngiwZ%00{=93s{B@K`dZI#WK_oAjA|(LQzTxh@c{fATrXMfDjY`%Q)D;0UY~p zbkt{`|NnZPbIzO1y|29EO76Yy^4pwu12jF)u8=FY?I$pYAQ!3}My6IOIr!bJHf31c;_w~L#R6X; z9O^ruQJ|h|;qQpSVh|0MYk$p2_)a1@jCvzK#k58~5}oNbH?%fx%qHmc?`QPt{1@@GtSpezSNm*x zN@%zj{Busujl7}Iup#FI_+gpmOyWEVNWcFonNV$t{BvJ>K&}@uY#e3DfMD_TLO-yohQ4M*}FuJ~2!lWN)F~uVIJV(!JZXvBTP!DC4 zSl_`4H7$ytPaRAJ&d_pchJIk>F->q*P_=c4#T;r)WV9+=$wS|}c!&D9G{&Dcc7l_U09mmoc z7N_&gT0?fv_7n=lkBU3prEw-Y zyS3X^`usix6BtYs+M&RYJ3OcvG&sz13PhR=3v9-vXnRObR(1Xx=2)Q*&s!ch8ia13 z2&SkM@}B`3*8R@7>x|L3({*?npmhlEIXixTQq3&7F5`;?Ya zav%`nUq1He#-f4^VBNJtP9N?y-dlMksO$}CJkCOv#KZdshf(;x7D75wUteD{Hk_Nm z)b@(8#-Jq-ag13~QkCv=bjfObYZu%AGbE$V0VferIK=QQbl&QE3Al2Blv}6ntLA;+ zOXiG-KDG4w-UFjae$~APySq;jGnOxoOeK6dek44~Dxx2$HOAiMaJoSjqKDGYvImWr zH3SCJio45aFS`e;2Pk6pZ-32E^1A2Vb3;8V&RZLFu7YtbyxM1P+4}eisp`QIR%h4* z|BQ@l7>qmy{O|%AucM>6s=H9j*9-lyM+18HS+hw4&aaHF^Miu?5E{PQ){{1h-p@YH zy&V+r@i17i>tVG5dj{7ob2Z<>kt;66%<>#7zHCeC5Rh(1ZvD-RCQk9bckm9ymGedh zFcuEyM+|3X(5bT7q3D(9Mt4j_BNse*=+wbdR8iWjK=$z?-+7WDRdM zSS#ldvMYRUb=E+fNls?EP$BYUOlHV)qJyoOX=CVq>_qP_C{d zx5K&9W#h{2kXE+VOJ%M8i(jQCD@O2nfs|j=^0iN zn}>I89=E(>+bG^vW}NMiXAoL*d-k#>Jp5@_`09 z>-R?i?w)06oF}#0H1Pi|icgf5Vu=^9p5s=AhFIRX8oX#k3M)GvL-(S@(9L3JtgWF! zhTNZQT9u*bM5I*nvCGzR+X2c2Cs86Vi?MUOWfH*spzH3D5?FMboqyBh&RjQjbwyNG+}S(6sdCEStd(JXEE zjLNR(Y|LDH*;;Z=yZF9TWbtL@uL~hBoOZQGW4%`b>sCEB_i3HjTT&#Ni2955MhIQn zBML`m*gs*f(msLq-(tZ;v<3s@Zau3DKQ(BT@==LygTQb7b)9Py>xQ&`!_2o+XPDuY z^*!ya1DTY?g!DviNd=u7gLih)IWFVE?zlhF)a~s>F}msK%|!#(c0Im9Fr$nSr^3yj zLk0q~+PWGjL(^wTpuGe`ToHsx;p^kQBe(rcC8Lrky-dC;YME zFU++LXl@}`8ariW(Y#9~Fnn5lhA3Nxs{${% z&tv?Frv3$bS?x?i&yQZ`sfaYr?9t@CtBqFI0&GjcCh6-mG9$Rvn6E<7(-9x&XK%|& zS1uiyRr=O(<>RS7iShG~Af}8N>o*$8VQ3(%Nb@K;7z8?Z0^1&2e=~Xeo5d3q`nH(N z;?H$_ceU0PDdCdL#-$3xx8f98slV%=^8~O*{;^Xg=t3gH)HB?}0AX)_$<=8{#yIE+ z(B^d+R>I+ly3;L=;s-Cx4j$hGv1=#=u0E)m4^~+Z?rIh0!qTQUpZqbMP(q)pn?eZP z9)CMr)jPNuMK^!{n6X%J-n;$R8=w7mc)yG%*n2o?-d_om63Tq^jghjR99q6K)ik%*o`eeW(l2s05R?4SYWNbA zAB`J_abzdUsmnY?j1#>MBd7@`3ZreO zt&EECjKl*R^BmcPfi=r!b84sFq z_hxfHWp&6N0Y~1y{I7sO2Nsv;y^>Xw+rd7HQHGPk0h6hDFX~9(E-Qf@jINm|BLzR0 z>-Y5XdYp}HIqAk5=X--!_{%i2Vy`?T1OTVxtsz+l36EmvdxxfAb0qjda&2YvXX(52 zA$Ouz?2R%EgIpW$vous6H}>V1MoYDt$0L?nvZLGgh?PTB)DYkkUAHUxDmjm_-WA_Y zCVQJwk_S3BNf1x{Q$yEkcisHpxATahU+{684hxW{D};bM3;dVYIy@mX4m~*4A#7^2)u{MIAS6;U#oustoK1wq65y4S zoiVT`M|2=T9@01;^@9B}s@-b9JfPKYK7)YrUVc>?H6V0GSja+nks2ElyY^p?2%j2! z^6R~~%%t`6cXIAf>KXr70jyG+dLnqJXp*GkVQL7V; zS{Dk|w7h4>*3%sbrzBSbRnJa2yfzOhZXuLS(eJsyfZ9I^P!=A8AHhfmnMgT&+=%9z zW1vXYH$0@`BIL2xopq%O3*SqNz2aCquT7DAO%&697_h?`twsfW9>()DDum-Mzohmapq;o0WtO>5(7P_%*e%0yQHmiqe z2LfB8C$g`-_3(%*^;$OZH0-9}yW~Q6rQ8F3)$74&%jB(<&pxJ4y{UFG3#5jV`+pnu)_`rXKgNnZGNr&pA8>pc70l;dQ`0W?75BPU!c!<9yc zjWkIH=CQll^6I8qY&=Go$en5yS8Ylv>{J82(D*k<)HdgBv=y-Kk|$m&0b`d)2@)w2 zgQ$<=q!|{z@b}Czlud^cU=~u?qw>|}${FbcZP$Qt(cjFi@e#4yF-ih9lf`>|1cBff0r@yCtQe_k!S zU3*j(&`T#jD2D~wLn=5-Ld!o_PZc!v4KPI z6&x?@Tl0}&^sk{yS*5nLnfRKL0XB?dN+vbnpEE~GVEe zrKE!OS4HMkCSQKOy$X9Illl=`UPrXFB)|VR;OFV{l#H?mzwBhS@8OKEZ(cAKmS$DI za2DOoIq{HnH7Qw}Wn%TjkhL@0I6kS%C+PLM-P_a}Y>*gtXbh`=DMq*+dp{r9hrjJL10wJqN8 zxeyRyavGm*mwC`OqfP9o88#|w$xKL5!7`85MkrJ4ibBy~e8t^HDHG{zJHIAKL(j7e z)I;FQ*}BwE`l6YLQ>XfjQH^`Gn*7>#NxpX{ZYzmMe4qO5)7!ASm&<03Jym}AHsL8c z|FCAqTYtVb7zp z=x&fQsE<)`UXcK1?Sl@r)+ka3o`eb(A+b4zG(U$c(oG2N^_y^YId3U+wb*q={nGW0 zdle#w*a+MGzx^iFF)G1$HAnSXR1 zKb3y0dNr;*KkDP}tiRrqU@aBA_P<4V;CbW*G1`4pAd$d!K@`kN?39sza4GUA#RF!= zS2w6O3{zzq4y266FyV5sU+&tz=K5m^{E^d3t_Ocrq|f@!*JUQYQ3~xjGkN;ah*F^U zK|)P3PZf(wsbmv;93gQ8-5DcD|3~`aD|J;jPj9JC|57bW8SV4^=VZOcVAcV0D7ll` zB*tIM&;flOTVhR%vx&4{2)|X=EV#j)sUIym;1<25lYZKHbnIW9o_T(sQrt|G>EJme z7sGj{x)Oj?HH_cuW3H*;r=9(V_aMgOURE^YVDvd_pw20sy7F`Mq(`Sx#Av8XM>MQ;%M-73@#V9yAqniJ`SuHDm zoSn)zkOKM_gO>e~msd)Jb_0IXiU_x(kHPIgrE`ut^-&r@_j!a=v_p&ErAQq|^D$K= z;q;?ZuR1#7I1}s)aw1H`tR5v@4CAtxT3$>rYWV&?_oZg1jF_GNWXMD`Gj?1MZ^qFT z14YbL(}@j;1h1*Bc7HZU`}#LwhS;%YcLNt~zX(<%S+fI$aC{7~x`ZTpXG_J6F8QUmLtTunSLri3@3Jl~onz=UGWM!{L$ z0bc4EAp>6wT^A20M z2BTo6nOY+&Q$1%*bTqnwtvdY2vENS@5JEzNs;;knc7NS_B5#E-ejR4cfKMGf>4O3d z!w_j)^TiRFGnPKj^cp>tj|aY$ml&Khl67LDymrVrv`J-|WS_rJaL(?doNp23(P^Y z0Cc+p;NAu_g@NL|$pdGfq?g%dp7L$0?(V)Cf7l0(X3)v5K!x*2ATf7S2=Lff2bY3s zmc)4}zwWy8hr_L-8KFs^(RIrY6>o3)dZ}64s>AEdwBL*FHj-fd;n%#8^%7LwVrvIh zP1ie}MU())P+W4;d`f|oJ1o3)tHJFN+Q(Do1{!we5$9(} zh)&krzxNw`en!x%GDt+kN~&}DK2VnP;@^LqR?2EO0P;ej;z%q_1w=dgCLq%cuC0K} z&wr<--;G&zaTaulV?WBN8pJ3|xBpA3eDmJ|hc6&-&?^2mh68YkuAYTGzvbTV?YZ95 zeW|0ZWh(td?z#C3i-S=cl#}NU6+HrQLQd`ZHE7^7_rocSkz${$kkJgtZaicAstrrqI^0?(_Klccjsu zvmPBG7n?5-5%Zs_z=>PBzsox`+my=3b0K$+V!n(<<0$pYF8Bbu7Gxl_WLIHPoFy%8 zGk%I4D+#C{4I*|)OJ+j{dTx8Fma5;hwkVPl{He_WHY$}u+{nkCDtA48(%NL*b7xqk z4>4e}7nG3s!m|kU49!X9tGzbfu@x-=8G=BhC>9y6CN+%J(}=S^0sl-LW7r@-#vcol zr7bElBRHlc&D?UH%~t? z(PLnnk?rMrps|oZ9|qh67VjmnPy+jMN10IVTx+noX8s^e&j-z|7sH5+;AKXE6yPQU zTYLT=LdHBxAqy)^#D2^yxdHnr1pzOmrmj*PKI{SKUzJar3cRQtgfw|OcK58*=47ileBAfg5&n(uDZ7JzZOUueQ^08O!PC-mys$jgcSuOA3{U?@GHr~? z1Jdw}3#AZR9vM+k7Xl$T6#~8rtLNi_@;ugXpyzzw7WeF^CUAm7H!6HpKZ}Wny~NUA zC^9GLTL=2_{(J3N7^=yxhkN6e!b$CkhSu-u-`tA4k{4&A%61|kh;R!(??(40N>-p~ ztTI!jNHTnQ6RH#b%H}UM&<_)>;EPwjHmL?=5Pz#fep+k!&F@+KPU3{@o`$%-r6o-= zR!fJfgM|_cKb}>dkW|QEi|*A1l1$ur)T*TcobH3oWettV=Z}FsvlBkgZVr`;gXke= zcV9U#exw8>iH9ldj9;OWhM_bEf1al?VYn_+PYtJJw%csgyTvh1b35!z?L)+vGXLGu z3oZ6}P)*fV*d)x6T&g=i!A|z^FTit^yLFlxl<-oFV`r^P=dCXVnaUvJZum9dfiSjq z!nt&{!|LlnM~$9%NiXi(gHa0&3>~+QRuS)O{xb}5)f~TLU_fgCMN75SN9kAS(_-m4 z@djWoD;g7KiQLN!G`93nLrn~$)iwo!s*40tl zk1X1szZ;$J&!#c(Nd|V;TsOC03kSwcz-+a%3e*W$d=qDll=gJn=N*pH2L0>sAO0U| zf6V#PT2j&R7##`)n(a?=9?vpsJ>zr`m*Es^i3k5v565;R)9M|*T(Ns=iP)-EL7YI^ zzAU@a=1k1$wQ)Pvr|q^esL_;r_Nv?A{J8c#AVaM2#jgO9>fNP#-N}5)ew3D0oSjHM zP$WE4iAGfj(rvsxz4IFCdRQ`ub=@`k1)_QEW%Do^Q+kr zmAY_=C=BHr$Lws>g(r)XIZP0&#%ES;Up?B1n;zGz&jm}(WkFraD2Zg6q7MO?;a{4a zMf}%AZSh&6vfk|gXnV66SRZj-onor3Eh0G=16+17Ebb^5DqudPNYP-I7_NF1$!5aj zxcWqoUr5c!EIyz?oa}9jwVO&I^Ep5h-=R27ypy;~uYZnievuKJRanTGH;d4KYe@5% z$mM)*(HRjcPrzP{Z|13iOkoyNms)n;f7-fjHxcI}2?@K{+!yJ7;Z+%mMpMf-OtrgT zdHP7G5k*di21A^jN3B<4+y>?(xq)D1lfA1sgRMD20z+h|`nuHLs|#1W4nrWxs^(gg zJ-Can4BPe{5CFZ%fu2W0J*HK!xf&csqm6=iqN#oOdgC(w^({Yh@N1Lc*+S%)*B;Dlk%3goei9wq^L^e(XdBr-5cCSfueTwS6R;R=C>*{ML~I=NDJnZiHb_@%L;_wxiH=+sF~ zjDHigF8d?pKwCKIhFir5yCvUZrY&6n;e)(c|FLs$nO{@&S)J$P=b%&T3szPwIdPC{=aj#J| zi-hKPGskxmi|yj~;et7)Z})v65!d$C!wQ5IBHqwTMMU6@B>19ccnoyIe?i^KC@Jh! z=AK|YcSQy*Y`+96VHF!5Dtny+=f2 zn#q_JMd=_diB&yMkzaXSKVwg#Prl*u)CG;Y%MW{z_P3h)7R}yP%-`Oxb3jux>^<$# zc;i%Zg4`q-Mb|fR{8qZ~s;*3Wpw=WowaD56nTNe_sfFAs$b=w$T}Gi)O_~{=;szNC z0So5UK8DNQ3_US10J`q}$i)pdSJf{U(RWUFOab;Z7tV@c8X z$N?+vJoMrtQq5qE$vnS5+>C+q*REGTtYw#>6y+5jKJ~gt2dkMFUz81ca{9_8ojt2c zf>+Jgjx?>sOYG+igEBLTAD$K~{oqbmTAq(q$TvUC9iFkkRFt~qFsg3d^X+x12rO-e zNk^1=SVfb#3s~(~0QLkkR4&i8v^59Lsc2Op^2$k1HRQ`Q)0c}3oi!YfCw_DG-Z_T< zOXI$BM*ZKdkKlb$Q5sAOIgN`gZ6}rvgF3zQU`?aC#OKbm5G9<3!MDUO3-@+0-l8Y< z=gL&l@@^xvdis< z*Nxwyzc!-zCuq%V2xC!kn$06u*x(uNRk+R1YvK=NUBBD*AO5jqR-hY@rLTd zp5Cvwy=e%t;KX^a$F&tfFc$qu=ZojpX4#dY*IK?wA}F8cR73pKo@r$PKFw6c1g>l; z*>Bu|*-L)-P@vW`$zDIb<-T(*!T&^(9Q3<{LQBmz>Mj0qc=x^A#cA4{K%sg?wxRB(7v4 z!aym+51zzBmsSg`;iBuuA8tc#0Xfrleojh)Di2X!V>wS=?sKhHedCF-{r}j5 z&%XTJTJ^u~T#5|wHg_IhQ^z71m1O;>Bg?h)|KcH zu;-O6>}l+cO~cIA2s76puuC}SecjZ=?H-2Gg}{L49b8~sxO7I&9$>Qn&HqNk<;l(CAU*p+BL zeR`w!U0s1{JIVay>4%%6zL#RNWAg*sb0qPe6D6b;)2T&}**dqr9$|skFWf3aGHLGw zXNul?hZr%mw1|9j$8KxtIDNc zGZ7lCE~2Lrr`9#OXYUWZnfX`Ed#kCwzbnF37SuYfol9sN)XoQi=>@Cl!Rcf%)~?r} zklRAzOw{5%=kbo0Yceuj7Z_>%%Eq&&szpKOu_D7|DH8}_8`8FSxgD&==gu{Vu{r%-J*y{D zM-7I0Vf_+m0*Lv{Mhm)@_w^QlJdB}*2H9Q*C`*wxb@RZf_3n%>#ndo;1w=vwxCJ@= z*QK26>1(f@?)fl5xwaWp?*lH>0{Ak8G5f$pqGU5GQ+=}PQAt)k-%pU7mq4kFjl-bi zFSI8{Q%zm@_Trdv=-MKkhy~u*L(I&fnFlf#WBL?Jh2Ta4`l4lG2cXU97IF(;R2v?X zxwwGbw(=#`7j=A@r3Om3n8?Q}dE;K;AiR1l&dX~`sSDx5fan`&>a}y@DCkKLl&QIZ zgL5d$X{pCOCxL<)EvUpjl?{C$u4u3AI{1@gBNR87yQzC;Zg z2cp2N@F`vpa15Yxp7&PPL|A1H_Y3oeRrB@sMiA6NV_Ej*C@@~&m8bmL-V!A=L(H>H zrSZwcfJdw*$Affmt|}DMy6yy6dqnbVfgzp+Xra!waW*vs(VDZTjyOdrGvd0}@3fJ< z`v1yV4{C-)M(-_co{FCB^uN4m7G!N+8cIWCbdHy4Z@Dfo1r-1;5DqMkqzcY97#tqX zii$fX57ne9xHYR&eqdK|N_8!l12{a^jDn54H8N=c+o<_{;A`FI@zQI*q`mw-g zx*Nw6bwBqxJH+g$9(kj9lJ;{qh*vrhuRu{jI3_bOiIB0det*4lC|N(quTV2WbI76n z-&amvICHS~bpd{q6o-Uk8j>1+4lY-SjiZ)=urnV)46S6B(1JO1ocR9=76uSNruhG> zHR!ZEj!Ax!GGogF_yXgPA*3=fCq;f6v+&BL43? zotp8#6}aa}k_C*DjpLE^fG#g<2P>^+Nhp5%OUbU-L^6LAj+k6rjV3ix9&RO~ zzR5YW0uz|)+RRH?A*^^w8Qzr4%l+5nlb=hl=C{MoU)B25ljCM3Gd#wI?#X+{*Egv9}^BNsi2d!UJ2lN=Ht=de*6ZP0Q;0iuSdjFZxAl8Hf#avLghRLo$E43Wqd=b1f6XSdfjpb`|m5(d18825N$x zBd;1*pfWw=*t&enq6HOx_F(%}DjuFcwIu#c-58?u%);n+ZS`|%LNYKrSmr%n2kB;L5KFm#|9SZESHtdH zxF795M|bW#tM!;G^R!eW)c2cq;w`Wq5zQZc{ARLYIlLSdl+Vb?;W;GA^n5|Psug-1 z1+-KQBk7vCbaXJfYT6f@4^u{AF7ZB?za8Yw?@OH3ormD(<5yFY3K&Ua>}p7lIhH&q zV})=A7T=Jl!O}t+thP5`k~D#1a^xcVadLPGm}={EE8>iecm}kzaU=YP-S)T=fI-aC z3qaiZ#&%+uzF+NB$PXzt(>DsfWnA}F(!V6l6L;U*8v}b3PJqv5*kR0rKoH`EkC%6F z;Jd#*FiH68scg2422CBk{q7`ecV~ayPc2K#jc zO}^CkPHF-?=S=NGv390$6bHI$-1RJ?lCb1d{mcK1)7d#oOMw4}Y+8VVyS71woQX_Z zX?#3@P?kRc6HGB>q+4cd<@3_|YSUs~kOyRlow5u9p|N3c0$FHDcE~L?t*zo$%$2gE zXnZJ%=bgrFU!lMlp=d760?f}x+1pvubIMHD1+t<#Tu7%RIzbKqygG$^i!A173_uyh zGp-N#5p5ue8K$E$g(*l^C`hbtWqO!cTEvQCC;UreD%W7*HC}tCc@Cw&~h^cpzyq!tU8W!-%?^q>&ZNL<#wdT{D*6)lJ$i*)VBP)C3^zclHuB9 zH_f=Tf*|i)nrmqzvho?-3(eHjj}@9rq1s--$*~?=bBZxeK{JhN&bww*wWt7*51dgi z@uOkA)%0w4#7At7N(^HgWSV1XDV%_rQ*6LFWy#Zak{}ErI)a zX=#1ntR3Z6##ObQH?@8=8z5E&w;dryH2k{L?G3G<``Xwe$inJFog1cMyZJFFxDuNG zPi(z*L(T5o>U(I^uh1e!9E7hs(-a?Ejojr!$(XsMhKFuBlS~FHE(uU=;feN|LDr{# zTjz2-yj@VIEaz&%MX)J8{JJ5(e5L3?k8)!}VcHZr&T8jI%k>>uAgOh<*3KSVWAnq7 zHIV0Qs9%&+ZW&a5`@Z0ptNp)9%PPpVX^yPLe?SW-L_W{j7~((gz5Q4t(>r-k5&xeL zu3L->ugkauYrs-mFVMST==cJ!&Cu`XhqE<{Xy&?F1l>VAR79$}YlsEXO<8dc@xj*P z6p`K}Rwl9t?i}}4Fqp>`iHSfqG&s~@nyh}8P`L&m26=~25V36ApDk%#D)4(S z>|C-1hKbWzW^w|E5QML|f{Ye!1=;XQzraQ(rqHN3v5Yezlae0H9{UG0C)JT`477T` z$EGCBNT+`DJa5j37!wQd&75J@#W3I*5mk@@fFXjAQ+WN6wHUwux1 zG}N;-&Au(JXJAHxqtGFwAP*V-g@Hn*3D+$gFIy_8kzYdSDVRF!Dtp2sGE0UlC#m&l&Sdma2-ZM4*EQ| z=+x4H1gIXih?AA2B@&n%tTm=6>k#N@dFlbKOcLQqR{~W^f=ip9?c90ff!E=Rdxco< zLalShK7`x9$oS`s`J|}67SO_!u_}%7e!@~YdeRN;Q@LwKWn4>Wy?62YbRhHivYGDj z7XKg;BzO2E8e?V6)I`vzBl^$5wC8zxrB<&6{;^2T8IFW=e<{_&pEjkcbb3hIm;+Ps zQqK37NShm8Uvm&xLrac@hhAB?(V;V{aQ;fmOp&bjR$8dAI=yDPx95j4P)i(-le4^X zCa5@{FMu)UqsVy&v|fW>4U(yZv_V;79s1zSL-J{Dw-Q!+w>1 zXSOJX1N0~}cJTn|^F9P2R|6e&ipnNkl#{1gSca4X%LVCMzC|bvNQ@H_n(nh6W=<^P zrHtlyQVoP^1bWI1uB!D1TNmXUV5)HCA5MuK@|XGh}$jJy^e`&Q%YK+^q46-XRr zz8x{iP1Zm(L7q3~`g}*&5`TD~v9ZaAC>Y6PUXV?Q%LEFc;U2avP~eR#slWR+}VFX%2=lAF0naJ5QLIUCL(7 z^T_atzy`|Ynnc)?E=HXA@xt1R_sw4#-m`b}sY)t`-2cWE2aS5hZ#Hg5$6&Yg(}9QQ z)Khoky2}a1pxM69w-U01jejlPO(S0;1P~NAelbMW*yDp`saH3Z`m14;0Yuo?8hT;2 z$Op7=(zGYKV`Zhz;pkAZTgkp_k!kM&O}b+qPuY3Ldl!vE;vXg;qQK92|5 zg~`%G#2LU*CcrnCH&Q=MhMsrXv=rLvpKZQ=tO=2+aaq(LDW5R7J=q3bi#(8X?2o;P z=LGvK^=@@2PRLIO^m+=Ekz&je`d+W4U^Nx=pJz;S&2wKaJahl2x0ocu&7myB_Twfm zP$0J&|98tV?*HHQ!JBA^G6iYvsWq5PLg35Tuup zWne_!q@z;`Fl5)709V9i%n+K$u%VY0vOIM1&^A5_$quKg9aBl|Oa9nlkG)N$8AhUs zK=qnKQCQ(i7$J{&1xW;kR6GPGg?=5yLF>#USWp=2xEdq6&JeB?UI_DzQ=)M34bLKK z1=HSuM;L^@s2eZdt#SQS0$pCUcI(QJIu7py(yD2;Mzzyn)~1C%gD2~swv^PaWFuTW6D7X(EcX@;$zTs?{27KGkbh#|AHDLb({b0eNMxCoSA0Ks>Xlu@XO>=jWx^Snc;s@0x-5f3rI>haSSg^#e;|2m;l2 zWD&7r3ZNc!iZI@*8Da(M`Y*JbyOV%ddpsoQ(7{%)GI>XEGe4o4KcBYG_m!nm&o3S6 zu0poTnqR!^@Lf@^!J*)bfWwu1XsrEu|E)j%hy)!Nf8!9<^ey}u zM*##53cnFL_6FH&a@k&mVKNS1>Ow<^Amw zuh=@{DpSsjal=zG(3?18XB)2mF^E@xC1P}Ks&j2|V3D~Ma1W)L4?%~3oYgX~!Df|A zy&2^kQFzo%W_d=^TXpbCN9i?$#>fDQqI;o|td7Hl4jW;coZV zp0qnDDFg({qWrh!zyNlcjHQn({Q{Z0YFiX=`o0(%RDA`qX6Wwij2_ zJ|+A{%eeQZSZ7Q6!ya7^h*XrYg0N4f*xRE80C z8Gb$}Oor~>rR0BBvAd;PlO`6|1pJ0H!k-h&4$s`u_cCE`!;9_-S*F8e~@O?Ps+_=md2+S-VEC`mA<}v`G{_M$o88K z3}I7m1uf(Crq17o_B=mHCf~Xb5#$PGDVD)^Hw_;e64? z;!m#GXn>f%7HCa?Q@bPMoZ#z!GzxO{PJIYxVm0zki$O}B*P=s6sI+YxA@@Bci;4>vfnan|_FV5i1q|DFoIpA`>9 z%4$=MeW!^Y9i+Q$vrEF-by=%c;ly1XM8sZ!@v9s?WOn~4p*6X1v?$yAnRS0?G|%Ou zEGtmoJ?NGHdEMvnWNCn$)iC$^dcpH-6k_>OiciaA{n+g!H!DJoAaJeYt$RfxuX7FO z?^oc2XkcnI=FS-9%L#rXY{hC&XHKOq*-&aaX&B-8A+6D{)w;hY6?CB+{Zde=oz!1%Z0c5w6wFtxcG83HgYR83WsJsMGdfS!FSN4e{n>pz2<0!9(HB_i?}a1= z0|BCs!8pRgt|8n7-Or8HoBJ|CG==4mc^-|J`7^ytE>{*17(#~cbi5ysg?fBHNUXRm z{aGaQhgdZd>3Ccj%=`+Dc!Al~CCQPRsF`VrrM{b3z&&~D^% z=Btt*fC`_-A88!!eoo=hTLrMds7_6kHzT$eLjsB-!x#NrU4C{GZ0E?Z%>|7gOcQ(Z|DF65m{8FRJ zgiGwH_gkMmPm-1EqF-}gP8`XaH+__tmJ=dJJ;>9^5+H@RZT_KlbCh9JtP$2!B#{ji zc{9Xxw4@n2nkKNu#9Y`@pe{Cksf5?1dWNBLm1zBod+o=m$C51X7 zG!lu^w3f;Mk#1Rx1g;Hw+8r5pzEU#P2ipAbC1~z{-KEj+^*n-YyylT{+t=0%3lOx? zI<_;fzlasZI|jFXRqrOoK4&*an~@Qt((=wYPGaZ=S_KiRWCnGfxZ-tnu5c3ffb#tC zvnNjH4vL&u$8F*`9nS&W@A`-FL;K$^JZ)Q2sh1$7GNH1~ADQ3m%}qX<;;mgvTrKxp zmN!z4kRuANRA=;Ejvz9}(pFQ+u0&TL+ef_$`W?2Mb)L*yY7LvQHRN8oZL{6&o=9_Gv>F;ZYsDL<+9mxTFNinpOL0@_YAE z>l+POJ-dBa(Bk)c>; zQVPAEUO8-LJw5=Jjyrep91EFb1!NEhqb?eBdcQaJlN~?ScaRMF8k!b=)kwE?e$B4% zX>{$@MC_jN(2Ir7I^2{~f0VRUrtnN#_hw+;(nv1?p41dRi2X}xjCv5w)KQD-&&>Cq zzt5Y>;G(2hq#9@?kb1XH{lenfQAPfBSk_n44&HD;f!LvR1a!aYR18xeMeuPcW5S6_ zSq2UGe2N<#VjeSGUR-yrb`fprS~!#{pNna&qsGf}(^ARVU|ohC(;N4no~fH_l|{Lh zBB5?bw*Iru$BkJ;cj<45ufoFE%GullGTj{!t2W0e!B1uQ56vA{`n3|Y$0D3<)mp

E&=OAb$Uoua>DucEenwsKiOQna`G1~TPvLY z)14MICzdS@?T0*x6a~;foI!%fD^cufZ}XBgQBkM=c^{i}7t450c~0M1CJ(eQhvZir z+@WsbiBJPQZHlZ9H3$+Hc7MM@K3i$&=cwl>l5avns6OBe!y85qjHoRcLE<~dEto|5`#HHD~GPd&x%Q5 zO8xPzksnTb&~-a#<_}m6jzL3e;%CE4IV4pkPmf9rkmD*J$VXX(P$K#_(FvhXa2!)qcNF<#aQ)On z$NT?>syB~HGXLNIZ(LE)5YWJ+01bb1k(^5f@A=O>LVR7gR7b1Z}{y zTyRaxy=-}prlGjy(xO&QrInV}v`kI2G~fGu&hPvAe1GS7IQRXJ=RPd2`+i-|>v~*V zv2DGYe)M$#b=j8WeC&IYe&N&@cBo9$%-&bi(tb2IjVrY}@K0`@kol%5gx?3vUPct08rBk40Wvf7-5z zJ~o&QmigNIg|{7?{`SL9JN1{g^!V<&lxz2d8h#@92+5-J`s4U|+1bO*ilD2O1C~bGQ1p6Ds8UU1{&O*+ z$T!x3+hh*pR`qjeZ^Q%-sE2XxxYw)~QuT~C>TYVei>FB8xp=fa*R+=e0dh_*cyoM^{r+i;DseEwaE+_MiGwei%iFvIln$1sR< zKs~r$n6FpWKGw0QZaEmd`?D}^xtp1r@V4x}g{kHIDW4|6FmVt-U@QPh7(`1?j0XHReXsiVF!0a_Rc@JywR_*B%~QR z^g&$h4%A9A>F*X!n?DQ#=sQc|`Ov3TrB3%oNANw~Yxt;2h0e-w#QLG)n*|u%Ufuo2 zm(};?Sq^WL~3>*u~xsN&E!A(j)T_?rw zwv8ORUW6Q8*ur{`XEnE0PJjD&_07FmZbH$rz9Dn(_4nCG;_X0hlT*2&6}G@@4gSWh zOzE`8Ah?^Iu>f}bfob^3ewdfr3fK1CWB%h86z3qycQMMO{?aT-4POOsLc18gSEJrXV0>r6aABXSDEiaMRL*9ot8udzCo;lG?Pl(pXM!j+ZUa0kYlm0&mA?1H11Yb}> z`0e!+8-4j=ds|CO-Ia!_tLm1{&X)Gpw$`g%mz&oQxh4KPq1ts89rj1|X4%pE->Y0& zEGbRD>HmsS+7#FosdvYywE$o6Rlw`FU?WQ%tPiv!$WA3Jr z>ujiArM~^kGaUATZU7Ee-aPlJv91CFPFv zIKG;5(2v5I8TpSu>w31^Z;dd{#H8jd(R3LJcU*z-$2yh3_jpy`E*;62 z&rSbyj7B+Tu}__R@DN%W3}OU|fD&%-YnMs4{G@R=s?ZhrNfpj%9aFogNLY|onO@&=YMAg*jL`A>>u{Cul?ziI(B1z^|`tU@3M&gzv{!3Wp2i2D4Y#m~bq3!x8=7utu&Z9hkAAgr3MUlfQ+q)(XtOehaZc#07{j&YCBD=7maOAa z&dP=HJ@wBjUma(DQPcswS#_&)SZQ>q5;< zy3z2}st-IQJ@j5ZA7@oaLioXQWH?LxSpE(LQon%0vZhGW$=6pTLRCwiQ4=2RU4+aqTtN8gshi=k{G){=3x84V(yW^`Ij@N z5d;kIyz_=xP~ARGX7-R*qT6|=Sgu=|%qSBMUM*LvQc>^xqcC*B`1A!_4=sxf60E!RFp}aQZZ>*Vu|mpZ0lqn?z3;Cp0;6{lqW4T0RH+twc}DsWJ9h5?oCmhu_nrJAo!KA2`$wr(1{BaO z3*8ZVVAv1lZYgfa!NDXXqOS+~WvdZ;iz+Ye{sd*!8G8XEVMjaOzIMTPurYoOkg@+5 zyJ-h{#^w9KWsASR`LR$$!PZ*=+~|=HLEiU+FXBF{t$;s?@^xt_Lz{|^97_9-H{JFu zx}M@M{h-?!-GntQ>`oqIBOb&X0Jp4nKp#^M5;>OyLw)0J-ud?_dP(`#O1On7x;8+5t0a%SC$DZxqflX# zW=&1LBXfY=xCuqzbA;YIMhe4xc74m5B=0(8h>q1374AW54AEUtss8zhY2SRe5!FjK6*{Ogh+N=j_nCM6f@_d@yGWUbZ5^!ZKJQY3 zhe4^5dOz4Dd9q6MIk>hdC=U>m$f3k04jOECE0Jxqyo^Iuf0C5Qk;di`6soj;Wn#d2 zPYc&D&tM{@9IOsw6C)$be-pC59DwqyJnO=V5i84kOWFo*@Vu=%so~FGhEs zNAHcMpfjOHvGV(kt;WAA=1(g&0}-?yr| zZi>SRcd%a$Qx6_9Z)1zxlh$eAeXK&kFwL1sbFdfZ7de_sp7BkLKM6xg7dG=LDwSqb zjmcA*>2^d7CC%AW$^!XlQDYWC{>#_^%jlq9HqTpEw+4>Hnq0=GzvZsRwCrbctW76{ zfVXYVp@NXVosYVTl}oo4(Zvt_t#fw5*5Y{Ozklb?+K_yYT)5 zOpo2!z{KA?Z~ti#>HSViD6#rG@amZmT=HVk=koZep;m~VF{)7CsDMahTNKg}9YOkB z&QX0|aB7klH^iHUm6>S+!D+n<34IyPMx`d@f?nFroVH0c`p@NCDKWPBi@!f@ZV{mv zm_1dfG_R*;{H+S~7CI%CB;($|VTZRj-7Mw!DF+hv?f$)p&>S6MD6uq7CwT7agFYRt z0Bz$x+2)l{k1f-}!FhQ@#5AHF@dnvyvo-Ny=Rl+APra3)={ET&2x|7NlXpNk#b_hY zM}g224F%LpRZ|RVRWuRNliB9X7CgS`b0ttc?k`#7Cd7Mk1)oiRNkPF@J z-6!)gtcBWIf332+RDL;0r=$qUJazZ}T$udr_IND8#9ej%!Rb4D>@HtB^wUHhPlo!J zQFiyp6VlE~H-SN<5$6r}2w0@YXo^a^plaj}nstQaNM>1ic#s%_Y1o3w74?!gA8`ph zNH#Xg3ff=z8!v2&QVMY7gkXxyk18Z-2m}HDNF;55bA&3*nVnshRbkwvn7NeEG)@hs zjsnxLw?vc7LnUBqpBc8(if~)qmF}|#iui5yv$uBqK?wgruXtaVc?{PV`oXRo|%9W(>n0zNh z`lt|5VM$)wF)OV|vnIt*1w#U~$%stMEVCiy2e$t_v0ai~b#e$DN5s~4- z6?X*mcwI80`f9vv%$jcgxN0 zmt|)UZtnalPs~Uj*|K)`#-7i?lV%hlA{ZA}K;OXq)L7z}mc@I+^oS-6HRev;A_Q;J z-=j#OYREB>P6|XSR}^LTV4?RfqlFyq?YbW^p*{wA(N-B>>vhecLhk&z@9yTu)~k25 za(a)wmA64!laF*Cu_moL$0g`y{-8K^^~FBzXa&h0uFtoT(9;j8{xUpuz86YzRu{i% z$cyf#HfsGqt%+H!(3#J-67OTHpW2XHm3JT3S)GB_ z(u`u-8<2_HLTX^BY7Zi1lKf5#R+sn(sCY%P#lDP9c`^fgy$GwR<+M2}kt+Tu3(x*! z=DiKEd_PcupANFZ4a-O^RUI`!6CQOQzpekXH^BOsd($&iaph_#GP}*gE6*tZVBhz> zu+@DY=6qnz(Z1$qumH1*W;G=`7O9?J{z)sGh7EfCG=z( z{~4Ypt^#TLbzBp&*`37?$2!PV=)7HkumtQU+?9_uM0$2+XYcCNR0&*jnp$DEHz>ZE z5VR3Bm3IS!b}$CD|9NfSwr?u5q@|~><9`oB`rVr}E@4+6>Yb2h)8bg(n3qFq7kGTz z;y_w%wJ`}J=M7c~2k|j3W)-ki0V87t?LexL+i#z?5m1xMq*LL~#-FAPRT14DzSXu^ z6u>m!Ub<9;;UO!iY2_004zA0_nj!`sBS@K`Y$tff zdCz(GWC)G+Pyc;cwBC2t^owcVrewD(yWn_y*CXvj&>X*#Jc2lK`0G!<>+TQvT6M1y z0CIn{(u42Vn7hA6hndffj+7Qy{`@p%aEDmZNGSzjfmRLLN`SEWWs43l|2pz|NVRfo zxN)}OUe}R&HswNTNXMjzc;{4qD99{wHjJiGyulS%!qy$E;8?N9k?3-MEVvG%MkPykQI9Xvg z9m_4rkEh!9wpd@X3|i4WS=2n+h{x7h?ESjwpk{|^ZeMOCAl9kBk+RlG!-Bj_X@_A#=y3s=c zk<+*bC35axaRkIE&}y~G=2XnFo#J~L>*lNy4l?M04r|y;y1L;JR0~f>R73~MBMQzf^mLCzh)?I{ z&-8TPeDt5eInFgrdaT!PcFiVV!`Cf8yndw_`dyU%Two+W1`FX1-!e91FMQ~wAkXD` z=y`?GUbUyT4U>$_to_iM77}dORN2N*4t~P zp)nJ0yp~J{?5}wqX0f^D^e}5ffSpdQ=sf%-X6^T%XMMb$6DI2}T0J=sx4|O!Ec?RA z#3NnaN92ts1DgD^jTKVr>=LV%9UI!fld4!yOnzvw^7=%CIpo4EvAib9DR#*jQ@s#K z3z^I6=j zQ#HDqJB6BBUH4i2HMZ^@yESl2!n5^VrZ&*@Z6S@)Vsjvyou5^TmiXvo;|OavT2fom-0@@wdJS7&PgU9-_ ziX#n6!hM78Hd?P#1sHB@R(?gSPF@axn*J5~XzjJbV#Zm_Py&je>V^xIlKIMfKYP#v30f%uhLdS zbh9g^`*Mfcb5$MA%Qx;?AL*oDrXDGi>Q}nB0Gr(GtR? z)6?p5x1PusSw$-0GPAISgElg>DFSJ0M^v))B_EveVzMu~(7V5vZOtr8Pa`Q(3Rs)y zI|GDPr~=}+JI<%r-_!Nnsw_Z}l7Tj`chSZ02FO^=!!0d_+vV%>%k#{NAKv|;nY;VZ zV#STYDK>bs4RYu4tuNp)rlT-?4jXJvvk%*-5(FAqfb2xt;dKN5c)WDiF46(998|GZDFR3QBi@duL!6IVNzMJzP!qPk!Vl70LQOcnTwZYnW4 z$$-y@`{ZTvDK|HW8wj1kmmXud2LmhA^MbcGrM9ytlMiHeES~vgry=QhKW>w`;cS6n z^xkME%6TS@!{JH2bmKpAVL^2Im^zDmL>?@Afrj}a?eL2x?UHD8p@On7kuv2G3qxzT zlU$d$a#e-2%oy&8!V_Mae4MUQ=M{O{g$(JXJVEx8S8)rYD{8R+Yg}sJKg|p+W4kTJ z@)L4E9!|6+fVj>`d0fsbzv<1^*=={|fFF)%~0$o+W2m@oIM7)X+@p+>wOs>57HOM?{+P_C;suGI$Os37hd zbwe9b0^D88q#|)-&q-(r^=pacTF08Lgj}b+4+^}gD0^i zolo&X+{ucPp&bmh)aF512JsKd`JL;>Vn3R$Klgoe=jRV+s?tPP7G!&2z~4mA*&nol z-SJ1P)pe%8+P=%-a6M|uAcEivL*jTuD^gw+k5;Bd(SJ6Q4Ap@?eDA?(%0XE+tCGv7 zf>~7Oqe(8(V47Kd!{F? zi?hp1p~gjJyK3+Y&RGu!!fU?~ksb#2ru_+Z*iJ1?6K!oEadoL+?<`MqyRTIGw||pX zuLjH`8+(3PEx03$w)AGJg|L;i$XYx*S#I#;R&zhPyx8uFDt0xD<$Q);AO@YRy zIDyY+4_JFwNAuVv@M|&Vj_WB#l<0EpU73UbC*fqMWsl6m*I385bR<@rCqre{vICAv zsOk)azkQLOQox%Q8k-t+iM*+>SZllD0LJnJis^#_n@QcJlyx;y-8&a$Df6Ih>Xnld z;L>B*vt^kM-mVNzo=+2ijmp%t1YS_+=whmMRrFL{fz3(O%D2rOi*NY4Hx6n2gr?Kl znJSsI|N1FvCJIe5%ERuv72BfsZ&g?lZdq;FU-&c0GgHi1 z?;wW{Sa79#)QzDuQ?6w(82U#mMgyc+sxP9#3^NDDW9Xr22@bzgayIUuk32A0s?le! zT>UHl_qi23&7FZn^)Gr2MHK>%ROz8H^i+i(%LQwja{#8N- z=m#k?C&JSkOpBW=AcH#0*V)d*NeLBZr%riTvRgI{g!CnZ|lCGJtmH+MqCe4@?Z0G&MTQNJ9H9gN-vce8rl z5o1e`JeW+g9&8IPrbA9w|6VddGap&)2+-GfNXFmhhWNH5Y~Fn9u=$;^o|A)_s$WR2 z?pWYP278883nA8S#>d{txA}Z+1I;7B%k1Ebn}%JYHfB!f(uT-bX@yi(7bWX_VhZdM z`>MX3`=hh_^MOw$SK+19Pa8f6lb3&awRYb2Y)KO=!lUtm# ztLu=EiY63&557LSI!dpMS*RMAC=s$9xWRt{IF%IhiD0XJi)Oj=A^~>AQgdNMkR9V- z%&yJDFRH7DWjfAK0|FYy48=vdGI<-h_%TaUm26CNJn{1snC~6!Oa@;58+60pj)DkX zsU2fK*75a_9ncxnlj}9wM(Se@gW|^)soXbk>0;*Rk0mKjrY855e5Apqo8m-f*t)_Lb(u~Mp;C?02E+MjPEs@g zgf>R>>+?#>w(RWHT3UwnIt)!iv>&z9lDX}~ApBJTA~r{+3ghE6JYN{rMW;HZ-o>S3S|v?zw-hMH zsaAlrG%Ic@$eHr<&*0q(Q>r zIA8J{8&eING&YU~{i-Q2)D{gMujhfbL1_B*_{HHw6lVwkG(qlGnim}vpgpcm6>7yW z&5O>9o|9Snmsn^h6eCr&0f0zaN4=26buY4k`Y%ZXJ{s8k_af{j+F$SZ@5`ZUeu520 zM9em0j7SD-wM4B%RxScEz|ic@b<+$cTvQB|6ue+sR-z>ymd5BoiRR7#j?z~EXQCNa z!~stfLNd763lSM3p&XvGhm#Hy?W>oV6?T+UOnWs&6v_T;pALkL#}`-@ZS+RF1gwJz z-7}oWny)zN0dvO@Th7^#<7qFacWro2yT~3}fDU8)zjZzsbRWsvWn%yP&kq5kGD;v% z?8<~xt(E~rX+ZY%n5=APJOzix2Y@Nk>=-b8hcLB1tDj-5A7sdOLT%NnvN$(#-iCaH z{o%M?Hna%92SsEM@p7dS{Dix+l>qL2C`wNr_-w$7WyY}{vLz9Rh{RGFC6(bGiOn%) z*-b^um6Wsz5OdG(6*Meu0%UkW2SEaTJviC9#r@#@l*Re8Q{9@`ve0kaJH9S^l*1`? zPd6&KrYV)F}Yc!XHQ|K>gl?Q_ywgy43gKXtd72>J=2+5;qQY4-O0zGW@6lz zp7atIC;VSmyw(@#5U+YXBCOhyt4lk*oy`$gpY>(=7lF)xJg%K5n4Qf~x-$G9lRI>P z_&{%A+J2$bvQ<+qaijSg|G*vOtdDZe;6EItUr>P$zAy#I$erb@eu2owa1-^ush6RoF$?pP=u~HN53#L&c%DL zGjf=#PC-&c)Zio5rp!xREWKIW5mAPD@nYKjDc>l)4Bh?&=k6Lz z|EPc=ja(1RFX@b=-%4!Ub1j4MIcR5%iPNJGO|^%fg2P;&?Q4Uo$+0OwYR&Phf^GP8 zVlDuQ@uLjDJ#pZ#d>g_ap6Y}!3{4srL4)LjG?y6nJQ^4wEzIG03k}b$JSA?_RTY|* zXCwQ{JYj$3-TqjTs9iC+cPQ?Fx4ofvFP#=!F64Mxbz(K#HHC>lWh;xg;Eae#qhjkO z&0<0o=GW7gK2XSICYcYm9wMEq!DyuXV$7Dyb$`0IHtzG9_Qs}D9sAdNN#ocMlp9&S4*kmi;0-P^D%Kc zRR7$RC%b;}HB5Gw3i;KK_|7=$038dKftcnLX+s@1x#R&Qq55`hK(m{;cp+E~np8#u zM0xnt05s;TPl|OH0B?K|y8cV)&(*b_R0^!{c#6v&yM@62>C))`UuA*^E(0syd=6qH zITLqxi_U0T8roW}wyA5cwzpLepOMOEw$(?B+c%E*0@R)FXipgGfbI&p;s8D(|r2bX8s9<;$VX8pa+0%{h& zSFB`g$j(#Es#2)#(uK~AG5O127Zjf3KONCgQyf>`NK+J&2=)ty>QAD$LN6o5!u*U{(R>!+)V zI#$!1=7f<$a6(+Y5H@}14w-{6`fo3^-;62bM83Jn_wqAmP122S;&Jbt`F)qB4}XZ8 zHpaAHCj@cqQ#ZLaR8=*Zo4El*Lk>84!l(L{WNOcq=06YSELG{2HzSd0I%*>e4ZI`(vQ70^26Pd=3Pp+n;GZ7 zfBVZI6v`>L+7WGHZX(j%fdcYk=xW2S5k|rMMxc3g?Vg$n9O%P7}xCSfhfM z3{^|(%Egi!#_xs>-JkFzFHaZTq&YX-d~CP+<>9lIe+s9ijd}q40+({GW=yRdoFzh! z2ojSv0fYSwpx259wIRyIxs)m-AB>VFlozSu%6V~PqXC95ZM~{(TrG`!O+H1~_JEXQ zX4>PEH|#N^qr{Don%TMvSR!X#Ok`49u5hw>^?s0SLJQOPqazE5%u!BdRD3dPJiIPB zJ+7&gIO!H~9s=;NxBD+LPUi0;4o#8dQGda=Lda4(J$AVg&i2HK^*x9P10%AP{tL@C zXFSNi;KLr1O7}4nr-)mG1|$t$V;{fc{$PQ+N~o*yt#;NV809yhsi<`CRsGxRH5ZYG~Q4a4MiFNZx1_SjwHpn}F*=B(!}z zay?yNclL7eMv`fzkuX{fmdYrKItt^za8-HY$~wk7?@dFk{7Yg3R?jUIatp-_+Z0Uy z$?pOzB+|d!Wbg&_(qv7gE~YL98Tzat+$VQA?E0)1)yh8*+srVfz(b_g zN2J4`EBHSg)}3Ol@2Roqeg8OEayfKrgz&0c^W_6~mV+1D{AGd z9@8br1*zdwR#S~g1)Ta^B*2aKueb4Zkc7YLJ(#|~xoA;sSoG=deeXX)7#7KI{~+?K z?O&7B&ZlOU6Q}~Ql(?YOCRqUc!-wy5ZR2AO4h=t8x9|Twx!v4lzme>qVift>P8vj6 zo1)0;ZSZ1Iv7Q)CmktmLxOAJBN8u(Eil+MxJZ5YZrZsFIJe@lI4tLLc_hQ_C{*XiSKE$lAjT(PLO25U>^O(Zuh{KrnbB-Hw&L= z0)OZsbnhVKG)(oK_HpxZq2v>gbUc|Z%k`Z_ZPhWCEm?yDfu!vOXb zQ*1tqCJ*B#l*Q>Jvhey;Gc34sj{$YwvH-Nu23I=gZgsT`X8;4sTeLjJDB2d-aiZXY zJ7M3rV6R}^MQzBFC+5qkqy@s`-<-fp^-FTHf#gY?YCL>FJf9DeCAJlZXYwoA5yg6r zd^aoTVC-E=keIip1rKY99!LqMV_ei*T(Az?1{8ZH@+Nbq;Q>g+IS?_&oVoA;f-`Y8 zju_kv6+-_`Jp~jz*zs-j(4&YwDybwvI5i$B7w2i8_j8!#4~i9jtgQWDgFukT(RZrT zKQG0yrY!~x(r%m|DmYmCOKs;9Zu*}?uI(=(mk---JQiHBqkU}z(;O^jhT2m+G4HjU zaR_dXL!q--p+tn9kzO&gz|L1fD)Vp;5}q{>GYhymnEb>vE3Dy1s0oIMBTWmN8gy}t zM?D62>DWZ8{svSpnqbtHz|-^i0!&z{IyJ?+s1wsfteoPHe~{Yh_iQci%}+q{&s(vN z1JaTnb5lt`6yzBbTn-k%46C#wgy*wBzooNd*1^^L^q%hL@w+#?6I`A_|DHUXcWK}0 z>uY1p9deAJB)#{lKFpwvlxS4B1UDtXjfiC;Jt%griff%#9xct7n5ApwyFO235c<_v zyU5Kq0%W(Olj&55;V@mXAG^vPq&Q5Ol`Nq@&fYr9bkMt^t5LD#xPz0JDI;AhY&j4Z z%!Fh%F4*J;AY<~Qt>smHhd9QTz~1NY0yckkSZ+4~zAGKP-_B<VK6-?Bepax`IhY z_tU)mCBKe%hL6A_PCpdq$Dxfk=vHU8LdY62OD6IkGWUEz1Ig@sq{3p+Gi|*hAMQ9k z%&99b=JhMV)Y>kNfGVq8xQ;n7cJha&cD%nQG#7)g83@fnvv3=RaG?9_rf!*{c0j0I zG{b@b!U5+_t}Z++GT-UtloH%)X2%ddI`0Sm+(XNot?EYu`;8n7TK=AO7_9tuqJ7O_ z+Xi&!$N0%8cibAugG0~%7A(LlWa7IZLmo~JFDNJjv+82zB-@ShN`y0$)M)7$ z==!uKB%@?P8#-MUT1qRtIKRv+w#!2qn@m3q+YsQ@BM}Y-ql0Jw@*xx`FF(rR3QYKS zvfQC(%heD>oLv=lqY$&OJL3mE=u0rnBXuFwV#l1%S4a%mUkhSxkB=ZX^hmLF)7@B; zdCr>+R})`-+oSW&Gy%EluG1 zBJHP-Im3z5z?ti9Z`}_ey_28eMpm(hR{kt{EDBmjA4)w1zPq@IRTUjmjrrtS)`U( zcRgX|^pPh5o+coX7ZUr>eIiA^OjD%janj8X{<0`r{-6$W1sHdIM}u-zp`!r?pklX4 zdAF*$ptUz;N_Xa&xeK;IC1h%%hv_^G9Qcp8Zvji1^DHa0vZNfp)z3b~=?LoYgj;V8 zQ>C%+(LnxWxKp)AL~ffX#yPTr_+=(so(|8QR}Y%e%A?VBeP`~>MB@-Hc$XVD>)hxg z7Qe|KJ?&a~NpDJkhZru|I*NB?*ab(#{Zdid(1<+pguC<4eUDB>)J*};&OGw9$Xr$y zO-pE|{+gftd+?6;W8jVhl9|8vweIY&{k@(8)& z;3Cl$+aDX6tk%6mzmTLV(L13CRG~FsbmT) zjOdrODQ2}R0!BdLv@m*ZiP%!rk~XBCu8T3i!g6A)OVivNGv*!!azU^_cR@*DGguD2 z<_i=z)m8F#YKn*tuAouGl`H*9KuE> zhuTBbGpcdXWk&)lIMU}?58l$zP*5&}YBM0$O|(aVA>+QGAo_sOMsPD0^jK70Vf9o& zWFf5EPq^3Nxy0C1Ghc+xcq}U9!u@g4lAyf10_LJfV^m;`uj6s~s zA6@1=1re`d)i9UoDa@R)ID@mEIE%La6oHiKqwKrShpT5I`9lv`d_vV2h$$+J&L6O5 zFJy6vor`QFND{M5p;-+SFBIdTd?Rq2$}!&C$>H-|wYdWp7-^!jMUB!+q`ZeR=Sn5* zL0o`Sz(u>eC`Wn2&-RP&pr8_itm9u#9j zNI3zY008EA!X~fumieZ(-jXw!WtPqwKUy^#6~x?zlcNVDY@|AYXB<1YAR!V5h#pL~ zu+57{9(B0Xz~uH;C3rxqW~x<5>+54K#LXr8L8=k)?ZP%xFl{Gnm&SvUS^Lf@l|yTGalqDhhS`a(xbO}2=$#MujJ zbEa6*@nHXo_ktuVhwQN}m@+!3^2XdYXx_9Z}&y1UATs+_fU_IiaZv$#Z(QVqjpD~bE8k}=>aC{4+ZX;mF<>d9`_-eCMhw7z|yTZY7M(fLaopelQk49 zG*E7q;S)!z$zUY74|F~~Y`o=Wk%NuQJKqcon*;R2(%4dO`G>t-d3T}imYsG6XNHaH zlNXp{(~=QycBbuptNFF56Nxluw(jEr8_7V;WWqK)xLro?4Ac>Q5LYD-FNIA?V=YJ# z#rCd+0#A;GqWKg4LPu~uYT^EQSqayqTtvhx1meszT0jh+YLh562%&^d|Kk+(yYqNx znGPU8CC7gh5xZio?}Hopx}k=*=O53XIHl~yfP_w1_5L$s3;bu6)V53okz5hLFRMcK<07QS?FFi5B|JIfl9rBg z;6URUMWsQ;AYB#KCcS6i=~T18tdx*oOgg@#x`4gl0iEsSOe+0aIO7zxaFgX8nSGg} zSP^bwwhtLZ(dN;9?n;(@hSX)`m(gm96mj=y@{RJ5MJI?{{^O8ZqldBIy!F^mqwfle z7U(b+s*8OixAdO7-gBmPJk=exI@-DF?AWoORs-8EMWKKHWRw?_FM}AX1T&&;AHdZV zp3dLf)@ueY643+M5|3>%YZKOL) zKVoawdq!QTFHC(gbgbkgEGzk<>+pp!ZX2HJsjsq-u>9wh(uGD@8)T?Bff%^Wn}%O+ zi|~X|5F4Lo)|5Mv!rI72LXHVlIFyrJD2P|lVn`jy?&nKPz?pMgjb{o&f z>~kroee!iZ^w?m40agWT3EdB6=IfM22Qzn&hXJ51Z`3ueE+>w-U|pRRK=ys7j%+lA zh6BF%SJJ*7SVip8N&^)>&G_=P17EcJIv^1aMpH2d=YqgOl;dv-AKe)EH12VOvv2i9 zB6zgD*~IUnD0db8V0ZQs6?S@&j|rPgBkmRIpGeMr#lbv58A(}|!}jSe`wR|2i? zZ4Uk#x)Jq!9wKFJr9b`U>-;{THxu(Lih;I^v77b?K>;82etTza?@5kk5V=(=#B31Q zhHhFL@x!99bl?$dPy7bY-wAK?x3$Rsh)j69Vjj!Y7eLH;O+mV_8vRVP(PUA#e;`?u z9rf~Pu*ZBrym~rAIiwuwWDqr0tb9Rv3m(taxO&Dj9_Wp?&KK+BaxIX5bn?%vy&X^^ zz~~1&wf-sthS&~RQl4CWZKtcjluCi?@v+M*nkLQxtskEvPc9B`0-~pwqiPQ!X>&Vw z+akCk{fFaV8SC(FUqR`CoaFp_gvRia#_PxIcbzfq#_kw-1;?q&LK2D*=5i!}!x8NA zs%i*OCJ8xRasUUT~L`_awAIa!L^d;it>F~Ky4!i_6j7)bLac^0Q# zgaD(AUzQQcLk)RC-$^52|Mf=8m@7N{zB&YBJ1b5aZ+*ZYeGtFA->WI7JLH~uY#s+y zOEnJpI1I-Z0&sW{q6p8ymvVzVD8+Z~F!?n1kZH=)eZfjv`EsRIORlx{wG?y|hBWCg zVWNwk&GDHDhA(IL{V>Z&`RyVvzD_}lY!O*3VSTe_I^ef;G8(C;#v{c8I-)9Uy^GKV z#=v?uA^QXe49w}Slap<0ysG`Hi=~RxcTiydTl6}c|NQZ^kAEWhW&NjqSMMJ`+|0TA zac=ltrzY=(s)fB+Ir>-pFRC2@8(ap zMLTuFDsSp2zEn0=^{e}7pv;?2lKK}x)b=d#{rTyGxbs(>eW#YgK`uwt>KD;RSI5q* zp9Npj}rym)~x(JzHTO5$^ zC?SOu+H>;N)y;&8i7S&Ycnzt)kN%3lLJ5J}FzKP%h5TUfVa0R_IPmceDc>zh>8{P9di&Su?H?3-Hv@lL zPj>#`Lu;|fV2LSpI))H@YIb!ru}p>@du4nyXb~%?`6M>RX`J0h%U~T|G4i^CaABEo zUB9euAw0+vM}8F}4Wb(u#ZJq}8){gbc5GZ6b%1>VU-Ph}8#~!8UEt-ZqLXa1LXM3%N8hJBMx9 ztG3oOKgUkahA2M%r{3+|noVJM(MQh~VCydI zb5%p)a82myZ2deBD<^AvVrBvsCWM++o4_!I@it`EgR`>{QXDT!55B0)!pH9QA#aiG zMpbk3INBuw8z+xZ@e}8{lUNf>y2On2e8-ybFK?}jM3eU0%y;sRDsr||bA*$d*^|T~ zWMYz`Aw-+Wj*`CGu^U45diPat5tMZ`qLr%9+*$Du1=CxVM+8+l0jlJ8wQ}dF7N&x_ z>}3!%=eSuZiekcCU9HI)3cywkh@LQ*H z?a{>22TvLNYq{^|`CA@0?2kHWq3qsx$JB*Y5=#2B5tn4UtLPZ?yg^KHJvX(eeI}5> zN*U#wtaZhOSS;X4zLCWBc0+||ym5EJroHIuI(4+qfk>yVKGRQkuM#J&fBgxcX;%0s z!-h>a+o835w=ebS%8$>-Do+hK?#T;hAYlGpCbWW>yjli!e$*7Ow_Jp(Fpf8N+(`!C zBR8wHkO?#IcmsJ5B`?(9Nh%~6fW(f7$bbJ_b;IRaRoz~muJf)xwuY9zbp0$kH`;#i z-?L}^akZqHQ6r$pmWXEB5--+gM{GwE4ILYqxJA3;KD+Nw7No3vA+1nL?441f1LBK0 z&a6#iru!T4YS>Qqj`fFk*Cqd|sUI)Vggb3I(YsPq9F}aLrOw*v9)Fp7RmxplX={mTe~u!6j$=yUxU2*Z zJPoa_3JoA>rj?8}NRMUcs;v`0{^%cnTuMI}GTkF(qXn1vRxWP}R#-M#($-~Zs|ROI z#8&qgidzO~Vl^JF>+q7fH7??u44u)TzhT5yKvrI%l%r!MW1SZXr{w z*AgL^5K+!c0IyoR(g?XrVwEduZYr9_&JCtzP$p_bX~1{AXIRHHS5;3oh@PqmY=3yy zl#=I8ZNG;}OvAptclm=q(k4)?7u=RL0s&o>Tm2pxB zxOvX`(Yc>G)nIPm#ZuxB{_Mvtf0seOEn8!yK_)McltdJs-?zD|=D;wyq*~u7Qbv*A zvvl4kJqIO!lP*q5*LEdUhX&I5@qIeJoJ|2%2? z4U;&V71qRvAV5%RKykKVDtOWXR?dUIZ?1D$qXWL@LS$FaUQ zIB(-_IS1$vp2uPgGOn9V`diqs?yw4ta;+{NPXU?9c!)klm)`Bnw{k%Z=9_=kqN!#tq=+k!4}S? z{OUu#mYn?n0cMT@ao_y{AL)7d$42>QTQQ~r??3xw$J<*y=#(2*Z#Ouziud1pboca& z*{r@m)aj{{vBFnZ0MF%RU7O9eV`WTOZ9Vjg-v4$OHkyJ}JOJo|yUYPU9j(fB5X~)h zn)st}JP0bCoJGGRbp3GsGv6>Syz zKbax~*inoHLD`YSs-w?>w?v#co=@SfWHp6L@etL8Lo3I1L^h_%;lPa1%pMUIc=ikMvqNI^`nvs*qH5$| zMSbS}^7Oa~G2g~De*u)MTq}gbb$wdVWi0s}2JhV1pcJ2zcQW8K+=%90ekh9{wS|>jkqC3-_q~w$4=29z~_qGK-D5=IH+!qL;^;&^E93P-j^H= z0IH)70J`%eahT55uHvFERfe%jqpL|<;?%CUoI=LQe=>=gWaS;~`tlGujAUSp9=5dF zfI(x4@&KB$fzKPgJrgrmc&OmekUAtN>T^@}fj2YW_{TRi=Jr;>{xzSgTDhz%$CrIP z6}Pqz)!OVzy}4I>_{Xu$lre%MmW1N3VNDk4-2e33zdXN~IK(a4Pv(d0V*8by=^zEj zS{zesCDN)s3h;hQ#(vNXJ)3e9IQN!yuJeBD^K>1nCvq^~%J$CX&_ZHw1mKAXFrw$S zbY}H|MZb0N@FJ5dbL``jl;aXmlvU18FpX2RH&76{JeswNkNwqToEAJyFTGw2&-)sf z8YL*-xALbouC%+^d2+M-rXybY0D9~n+8KOn{eH1y)KIXz5{nm6mV|9f_}5AJSIoj zXyyla(dk?Yr1w}o0b*kZE7y-dmnR+OnR;uGB;HDW4Yq=92u4?2{DS{r)4*<;?!0#V zo~k|JoMByY)VRLZd>Ap4KxX~0j2F*Ep5s!bFXjfT)ppT5KVm}BH*LI z-x#_BUElg=^*rT--hfwJl+^NBG`1pBNDH868Uq*e+>e~^%%|BsI8WOlR*=8uU9Kl{ z|E|}P*I4X-bEHA$XdoYmPN5JJbH&+WFD!&eH<*?3Vj(`0cUWX>iBf58u}OaPl$wh! zwu&Xls(m@K2QB3a0>YCp z?e{%*x6Xl)E`wYq#E!(vvDQmS6z<-%HP!C(d(}=r1;$9Yc?YiKh_6J|3|4aQ?@-w`7 zR?-IZvn`*TWG~iUky6MtF*J}YGO7lt*$)COMj|&qo9$?q21Qrvd0i=5s)Zjlk~-tA zbXfN?>jHJ#w%0~qzjW~YzQy}XtTp$ignH1MgAd57(y^5Z%U4@oBI9#*eBSWm-*@sb z!#u*dA-y~cKtLEF824sC;>zK`$sHNXU@N5*dE2il`}JRtU2O9(t2bZ7cyY;-CGnl{spu_---Z;_Y zAEEPX-}S7HLu?<#xOezQ!1I9AsvXI_KhD<1w`27bH6Z!aD5_2U{8^K4fS%XE3ly93 z>H6iY$NNN3YYAlt+OWpR&WpR<*1F9uzt6%(Kee)tnFlxR<>W3C*ma}?nkd16RUO-& z8zqbmTDqunYz}_NPUR*~ASW=ofA1dt*x2r{9B1Eg{P5yG9Tt+kRcy0_U{hJT%*Zv< zEHU_q*Q?)3q8iV}3UDS2)2I8kpZ@rx`_lTm6m>EZ-@Q4w`>{;bs}=8W?!83WuNANBim*lg>)q6w-r7uO}S!;i**hQ z=mi7s96`GrvU#@#KT&iP5%A9zy)VOXd)h~{nq{I7n6bg@$9DyVm0S#o)syC-hk}NP z45YJ_et-2u{)jttNZqlDpyO(U#c-r=Bz1if+#Vb7xrmYW#*)CYJj6|EQDC-}99Y(w z{vHYa9EFd?RQ}mlcfGVIO0S!9cX?=k7A+*zwj%2)5R$>qc+t;-y>XHMJ>eF0h@80{ zcRs38Uy+#>x`Z{E3n!a~>ikx!tlfT=?ro7i*FEa;(h+X0YSun55)>1IearQvRs{k# zR+~YfIsrGPj$AX@R6+}|{Dk5=XeL^`abiSVd3&bBP%G5X`$B0Vrhi(*{66b%pEU0@ zVz`TeoF#)e35L$847`?VGaokzSFfJ2Cpq7p+rPu1a`&U|?{~%j>PmfeW#~=pNp;%! zOT%|>dCkT@OZoM&sQ27(b_`gXZUFU49!OUWXbUrFvjTqm8l?zk!)KinmTo^i7~@Y2 z;$5q((zBrR4@}jEhG}E~<^yv(#P(`-1Y&m{w0N_`{Vd(yZ2VQMWPoVFh6;Z_$$N?p zaF$|ydWJH{RV<%J9rjrk4Te=m*TSl$$T?&b3bQI$tT#HgDaa_uA^Yv)$06T!t-fzh zj~rEIV}_nfjHZ|p}K4=bO!^kK(^OB+00Pt#`N#u5unfF<+q*PGvXY&jFT5$sIQ z#$JNgj|ful4b8`$KOJ1@R5!fC&J2tM-O86~$o>x@gB3p#LP8}&>A(;qiCd4nK>H)NW9d1iFU zEX?yW<6VW7Nu5zC^-lar^KqGH)7=kch+P%0KOUdG38ZHyWU9>SB8Y>(e!P2K#TxLk zZZF{>{0fGrW;jv6fABjIQRF(~TEZapFrl5#vG9*66h19BlApwt6w#bq0lLG0UEiI& z1>&HEg|;s^=7Hn!rqCW%Fy6q9g$^ptYV&qJyb?%mGa%VCzDdPDyz%IJ%V(;Ae5uX) z!swMRX_GtG8L*=2sd-(IYa~7o-k!Mj_28Es@3u|K9^Q3bd5Focx6(X+eDJ6@dsFk= z{6d?R9)9M@n@!oQosV{uykSbnNFCt*a3Jt=!ZIVbir*BIQ!!puYk|s9wd5evX?Ll- zwHwadV|A0_g57lkt1Zsh%AVQ6gbKtu7qoK`lN(JQ=hF}#q9w|}&XA2uj{nY1vK;ct zMCcY|A852b8edcvlXv>6Y5OPt<5RH&&Sa)r`H)?uV_eVVIaML==~d$^`^df%0h^2T z`bjA2%~HoBqCb+2_E#<6sE8Ym>f>H(hkK5)WkR+j2O=MbC=YFT=BslcA?h;Tcin2l z-Tz=VQ?7);YjV?@;(jiAVB{)k$f4yMNK?<6&6ZCp0?s+&4ZVi=+N?X$VQi2F9HS`P zmld3Y3Jp?gCWs|@m}N311rnNM3~;9Qs5v*+F6(gGmReATA-3gRbiX`ZW@0C5+iN(p}R1Tv?$smEam=% zk?uc5P*jQpJ_(W2qNyQz56$Goq%+cNMD)GlCxIJ-zb`)T?Yh}Q*O6*8-X4a^;6$6; zhb@~={`**|PqB2@@S!&kE{f) ztQqsd>Na%Qx^Z+X(M(&|DE_@J`isApHlEBePApzyo_;Z9U)3|7S&AKcdkfW;Qvq#w;r-PT<%s%f zLfspV8QE!JtoV+~Ww}bR){9=wY2L=bk5czUN3ZtIrXMG#GXEj@ZMM&|DxLo2JS|1f zE8ALj;KZCV=6U79mM0c-bzDK-I^MACxH5E8D7;2`HMsVS(8Ir`@|9 zQUM9FBN?TxM~5?AmfG;$J%}Iy;$V5sNy*ozKZf(D+s3oot|b_h4D{9Sx;Eu3Sy{aJ zx-z%xP~g;1lK)ZbMjZ2G_ZFAYUFc8Q(xJ1>^5D;(`C(XzQ=5{r0Z4Iy15Znl((b^$ zX;*h$IWTcrsWZFwdf2NQQ?D(%5uYsE4uUg}G5;X6tk>6Nxjf^l|H z#3>STfUSyAXe6GA5{ z=wpBWR=emj2VL>+O}iz$+IoKR?INcd1#?XN;}=skRX*PP+e-Yykx{TfWc63$R(nep z3@Sfaofz!$y9fh*t3x)gM)f{R`fPOb|CSy`RhCsF>d&L(F#vD94rUQ6{=k%iyLyUN zh~(FS>G32GDuWn|aYJ!6Iqekz!Fpt*wT9+aPYlQrR}q0elb3L6&+<%#GtgiMGB{FQ z|Lm_U&Px6WfBrlFigte--a2waAKTSXr}?45GuiiF{*KF=udGOnrJ%}Mn$2dl5%-d6 z03bN=-!4tfD{m)rqcV=@0G0pSN?`wAD`7X-N??a){ElpFy4>7yrM<4Hy{-B3)!N#w z&a0#0;-U-lzvV_{=ic7$lAv&U;=TEf_p>cl6m|(mk0c3l^=;%Mbnw<`ib`8F&Y=%3 zqZYvJgEvGp^jW z1D}$0G4aDr1$E~Qhw$J8c8F=G=*TjKmDNDu4BKTmI#;_>d`>zpR(=)v=sj?bjCa^2 zTR8=1RvN9{_hQHA)O8x2nojUlH=|_r-KUh3O*1Q$3`jt)yEf2!`f{UFEDoTLdR!XI z$x_c$J7HOJ{oDuz{#D+BbXJJakYi)-qhoVq!qF+JTj{<>b>kkeB-*;TsTQyE``uUR zI^4;Pt)jg`7w4#S581KnTLe`QscK2gtbTOPa~u7_J*uME^CRHd^xjdwaL=2nV%uxV zpx`wJhN6x)MMmludIJ|ypll`$l^8tD$Pwx*ICEe}FG5*`Y@fN&|21w6B%GgMxVaAt{>&ug@Y43|otiUJGOyI>*QNB+hZldiVT;i;MHBQ_ zyLDMvdNGtfTX}~|a9Z-pn`c{7QO5lrW!;+%l3)I*EEkV*u)VfC1;ZEkIhU2UxAtkf z&(4gBaTE@nTyjEd^$kd;h1}*j4xD_6uoLJ5tPc^}ZD9d{QU%oh4O@2C+0hqAEsJFaDfRpLLP3yG-w!=m`>24}jwBO# z2;2e{lz@Qr_SuYSb}=H@@BVYE18&i-7WO$)=AzD!jXgY*FaZ@8W(|pUP}l}}K@E%g zA`oq$FgfVA1Svh3fQl<0=hVq!M)QPQ@SZ@ED4m}#Yvhn613Y-N%lGlUtg^z-7#xUs)SH-(2N9-68je@t;bv5=KRS zjyc#DEbPOVr8)=APjqQxY$hAP-wy(Y8O0+N0U6V@G~W07%WaTR-0j2)TI=bhDZ+>% z=+A)-5YL@qGG43~uJbm2*A_l}Xgw}d+DkxzZxR+WS0yQeXsf)-LWM7s+{EiuN@e=! zeO<|r5VPmA`t_6tSyNT_r|%LaxlSBtWK!*XloKxZ6-#W|y|%MidC9M?qy%3@Z14&) zz^NpgpJntMF{<-IYJu3P==Pr zz+5?V!h}IDA*^;w%eA0ErNo%2S+z*BIS2Sg^;4_vjkb7}mINdl9q3Mv+dAxRSJ%4K zGLnwhja%1rOQ~j4U0#-Rt@5JG!p@`ek&#&K2sL?NNsv)B5$5i~vmq zn;b_o(@_9V2OY&BzzLH1x@+e{V0FM}9%kdyY}cZ_(#l5m^yN^!$^;4;ErQ~ms(sBj z=o%TD`Ui7I<3a&J3YuD=)RhQrN!8RL=Qm)}p>}C*4L*2GXO-Ff1v~R9eeX7}BRJ!m zJn@01wx3Uq>@aB2MJ`)T36ar9(I*Y8GIrvO{@R}793 z)&PTdl-`cdNb=H3ts?*uYmrJ<%<6REPVNsEyEb2S_uPaZd-DNgY~y6Tlc96zVyf|$ zX08_|U#)hpYpHKzrs7HiRy~azdtUPfq&%a5TrZx2vKq~XpBPz6!K-OGx9n}+o!?!) zBzj%Hp77>c=7__EVwc4 z#`|LI-*>>7T_TmVQLl)(9kvbB80D!H#&9r{`lH&vq_2LTL@Wlp+HfeEH9|YPa$Nh+ zO__c_C?CQ-;wJkuJTu0sgaIMvv>Vr-@#Zj-uSKVMtzgb!M-*AXLmSN%&k%K0k#Cy% zTXNsNMZ{sn7x+?s)H}nd$xsh5Z?yY(Z1uYI4IZ{NZgT8E;uxg}(hJ3|zSe!e7F3o* zS{${2H8y;nc0GNz%vHDzU8VB>X}@;c^$TJcPf;@&!y7UT=Th==i0+B~D51%>eOo+X zA&=0D`aQE!W3N_-5~3<}#T6)vIzU6H9nyhj+I;xcx=)LI+hX&6g}=u*$9DL2)3-;x zA77u=nKmB&`cIUtgMI$}9WBjSX^llYJx4Bf7M^8ck;GrZ=eyE=j^_?<`K`2crc1}J z=;85mJsFTUF97A+~l1y_@5XW?`Fux9dDqIiCWm@0(E0! zBim;K5afq3DMw_t+~=?l<*G9f@>5=JmjrbKK{R#%_DZ6N#$o4T6QjJt;BfCMa)Vd2 zmK^k*52dZkgFEgn&h1?Ayz$8VCT?48Wu$tdt;utR?o}e5Bs;>jMGln@MPBkr;a%FB zFgC`K8{Qi4QQW+t=5~Dh^LA(kvvtqLt=v#s-a)TOeJ0fjlRa*V>^x)@@i0YiQ8xIme_c-M9_U%4Q@c1&6%OeCa7 zmmiX2YeZR$0kH;iKb7Km^1){QAM(mc5dn45fA`k@s>{yTh7CR)ZhwxijC?%)NAk%q zS6+1uqPX0D>1~d&^Gy8jn{Kpb&y~e-?aeP_MUlI&zL?MjO7=)`hg4{L?|9q|80gqJ zrc~~BrWvfAKQJJ|9;#MKl!!UE*ZA+|`Lm@F{~T`%PtGuzVAt?aC(8rPnscdv zskcr}wWr#vtFLW6{7=Y8YVt#Tf51u9XWhzuLsd^reWDt6KNs_>-v9CYqW+G(H0u=p z!~Kp0y0JM`-Wu*U0NA4gZ2Zfe*b=|w{G7na)jY;>@PSqKP|iL6cUG>2H*qR zzk#!T|Dm&i6ixVOUlx0#Nt++A zV$X+0R$q4}q*^o|F7!(G=-KuU;u5Zek$2>5MDcxxec$bt8Fb@V$(v0Lx4sBmZSL*9 z>*I`NQ!c$)Gx7GZ`sSqfg7EJC6Npq+fVs`v{x7N_1UIhMpdp`%$)}mVO~P5>A{pG|h@#6$m}7v`mY! zcv|O=hd!bj^_QQ1F~q$5*4LmIAv|DveChN9PN22Az{R22dJWdn!LReg`JXWHH7Z$X zxy|#}JAU><+hH*9e5Vnkt<#tH46^|Si)dYjsA;ZVsb-Bj?V$v%MAJ}%Led2Zi4$h% zjU$UD+c8w@gz5d!$W-jyg19>SxRl}1BARXAQI2=#cU(z~?+FS^w}c2S>NH%_jwn={sT%!X(~Ng6@GKsM{Ty6UFy#M}0- z5QEFJJBs_x9s2uU;}$86i;VIw))Fau`H4|rI#BnP$Cy~-Ov`#;mv9$3;b5-|0fYnP z|8kXc{TYHEhko98^YR{xJ47FkRk4p`qfk>nB~dbE8ILbrPiojy^OsoIz+Dx#b_H3q8kx^(MGjJ8|gM3)yBiyYfs@5LR&fChsbf zR_v@U_W^RfAzjw8_K-~Bz@iVZL_?Y_Y%gUweIOit(F}qdl$XHLrL~x?lC{larCgJNVSVoQ=dT;4yZWFj-bh1(hKBeeVY_ z$3Gr-FS|!un1aT-(KbW7usykpUs%3d0}u#%jHB~#=2g9)gP6$~j)LXjsC2P?RbKMB zWhMEt{pO_|c6LNFjX|OcbjPc25B$L~N%~LNfbN6G0q+Ow80FBRB7zDbrV<-;(U%JF za^WCkIAKZ3$sJAa00j%2T=YUkAiQ}x+K%0*K`7vQs-vg0XE zYgLxfe6#4-LcR|-6D2lxHP0%sdNFw?5W5~4lWvm}3Mwhs!F2l0-CGt;S;K1ED%jw7 zgEI&jF!Qb}Xkk5ak`AanztJ7KpvL0Z&@Hj?B;CAk3ZAa7S`eBy!6Kj+&jlRuq3

  • u`qf2VH4Fvx8)gYtO@rHF_Px=uI%?w4xe!LN z9)OiY4O?z^*ifybvRe@OeOZ0VcF)LiXVtnEX2%Rff3Gy!eb^(Y(R03?sc*TV*r_B* z&MnS53(++h?q}nM`%dk|Un^#}%c5(hIzClQWwcyD=MN7|=|>XF#NiMSwSO1e^CHV9 zZ-CSfSK|XdnKSZ&`V{xnvsD!Y9_YMKSnr|wD{9v5sBKS)PTcn#A5*yVbMfSzT~TFg ztZ#Ov-pYP0KT@l4s<-D8)Gw?7fOJPG0+ zz5%`~0*oeh#XZH>jXv`1gH`*dfH^3`VsVEI3;MHnI)x=ycm%QAGV_-t2jxD>mH~>XIFy2S8dO>Qiq&H%7y>aU?YWyWyDb?Rv zqht+?LM*)ptU<7sTwt0D4QAa={w`tawd2>1@jLVF1L;&3BmC zw|)2Z^ZTo-u0)KAg+AOE%0}*Np+2>Uke7|cQ8xzkVcvH3>aGt;k(gt-?8!>qd^iZ5 zvkO8$QFo(kTAfB~-C=9Z%&l)9?DE8zhpcy6%&1ZcXd)T}>)ozRXKF0+`D-2haa2Yf z`d*RQejUJiiGE?v5rK#5FPATIxcg^at}RZcQ$9ql+#U5P;;Z_fFx)7mTy{FM( zu{K8e`6j;R@BrvJAJjRYSabS(zCn=!6~wJJzH`hoDv)9^l+O?h!TA;*a-9Ds_c#k1 zxnj({4DF;A$BHIrku|J3iybOw!}H18Zoci_R`9I|F~|my&^9xHP)=HOZ^9DdF}CgBG>2`egI9Ohd?v;R`_<;Zohi#&@I())v%*8| z0xByg<|q6vpNIA0?H5Kvm)L^<7*X+KNZIv7FYfI#??dbV^Iz-j0i^xw50I`k1`pTk z>4?Qe1d#$-!g%b);dU_dj%O{fbH^blnq&fP(L2M5m#T7NRwxZD?II9#=i}3n;>UiQ zgzv^~9DQUAO%VlG6^u|NM-n#eLM8-G!F*Uv;?@`GR9nfPQiH1a)D7M?GsKlKh62; z+GI1?ZK6MNS7XI{>!fs@zttQlX3ssz6ciF;1hWg4{Exr}tBRh-_#!;%eJy!v0$M|W z?ZFsl4pE0KLqhwOl1BMMc!RUWP?PeK){(dxZWPvliMPeG&XPfspULy+;DDtrJ@!>n znh}d>=bB#LiP@I-a`(n{wO5Vnc5<3{!&HK0z2<%Lp9K8}GVEa2?YDv=0Cr{;U9e0@ zPw>+QnHti}0A@85>Ctg$ueK^A8hEz@&{ukXg4zw~t8x*^M){uab&IWPu<*?i(5({~ zEal4TDKLAxm-*Dok_sVCUo$7nPaND%m&_(qb#FL(fcZABr!M8pXzt%(>HJq3D}Z1u zZ;-_5e1_+lJ_@mQOS*3B%N}KV))Pd<5eV8vge<~#jqZ>@LiBCZ#*juO+8{}6n1YEk z)^E@gB)a>vwy?6M3$=xl5V>NQKtR^9FwHQu^NU!O{oxeZ-8k4@HD?DZcY?6Y_9z^= zP;|ypk~%z^M5Ejm?HeNmD< zH9p|khL;GF{^B}F#&lj$_nIkbFy z0BPWGkGE&E!3K6g(UK5 z_<$Qr)u8`iEClDPrN{{+a=`q~In2LS#HX+IT?{$$OG%mg2K^ITL1gcC-6A44-Nqxr z7ytq2E8Q#y!;^;{g3r>CGOS`l8$bY!rS(fo%-kw6NIV))3#y^qt4yyU_=_wlM2WxcjEPx0y+_ zXAm^r{gkzJqM~TTas$Iyji46?NF0njtc4T_%(ocZ zRZSwU&x#GT&bOlVC0SWomC3ApZLaj?S1H)zXnfE-6l;-BA8e^er<4g5;kb|hgUOb{ zBk@^FH9l8G$T~+`Qw*2{L59^KpukaK{a)yD$bmsw2aSO0KP#Wq1D@R;w6)Ni6Vjr* z+9%rj?uAQ^9O6cpWE!oU6hjJPh7>UxJgfYLr&202M#?ccP8wb}LhI|s>KqWq=13cQ?--hRbAv~N4D#<*sU!9=7Kp|% z!x_B_&+vf;NlhV`|5TUYu{?EufG*xs6xdf>9Ga|WJHE*wTqg+W00}`YoIP%9#Hk~Q zQV8knB|<1@HUa{lRW^k%wfmUnd4H3-o2PVKT@_Xcc^o0;>3MWS$w`sI8bhi@O|~E{ zcnAj?2hO`+1p=Jr&Xm@61yAsf)S0nPzHg=MaZ)9{ogxtj^p8}UA+lo{M+x^pwwg)p z?Uuj~`WELcunc2*Ki#i}5i8U45$BpMv`}ZEco}{;Qu(YtN!B>CPZWjKuw=C;2YL2v z4^>rR=4wV7Wv`JFd)anam|Bc=9Ct!`6q?G1J~jNJ8aVO2_zge0C-U0%*-d`;S=VaQ zPTrf^_xuYGmnZxSzeE@xr&xG*z8}MFetB`og6oNJ*l*+V;kFAmA;uRnz)JX^+?3*$ zg@kgRXQIl>{PQTV-F{`Hziqei&D0M`TdV14BaB`F!%LcJT8P#s^i1$-$U1ba@$p(T zZ|H1>*T4bd4NM_@Kxk7C*YR*;Udv|D($$?|?1$*ixL1XbUTo=h1g@~LXE<3xK6@Ae zxYS_lzmoj3PL$5>Y~B!Sw!%cP@_ZL2z>K8O8D881qX+#0N6xt*nVjt)g2*Z@Z-|H{C}c1U^fuJ=ikRXYt6kK4V2|3& z05EbOvV3;NS`ITFL6gov0GBFe)WUsOzS`ByQYXptX0BD#p67+zBm>p{d40ClK`t8$ z`tt_74GMF-C|m4l7d^}IGidR+gOY9ZjIv9H)?8?&+m6LoV3C730~I= zn4bjrp_#}eaQ0UxYas#zy=YI4$@q&icKQ&Rk=O<;c8?E zIIjI3RNUIU>A3I4lz2A0wE__Una`x>nt4yMiQpY5U|%RFsEF;AmEC0I*rr`xp@Kc3 zBxm!9hO=}d|BuH0@MbzrzKhh~slebDD}J>zx8-Z;I!3JFN^N?%75zGDu;mJykAtyJ zppHM_Ds1e8%@3q4a)UDHeOkf82LW0>(OHs~aq9G+^A^)p1zv2Z<@7oYrD zZMcCNzmAJjLGf9!bpm|Cu~!7eM5=;YfEU&e56<7t>dgfZhe zLYxZELTKdHIoMba*YQ3qGDjSG9+Wq(f#Rp6QMDiQRd5 zUo#>KSJDCK0%2i6)JX!e>UJSDIsA$KN?XC4pI4eIWxZEqwn@=#l{~!v!IDBsCP8Bo*ipifa z$tOk?x)Gltvbl76UJ6!*7|gQm`Jm73(}ic}*CNZKFB9^Oh^59O`W_ZFbdhbY{h(CC zgS1t9$0I8v&cg;FexicV)bC=GL3Dqhd1_<3sD?U>+{)?n0N)c}Ev?WK%~^3e8Yzyz2d zo&I}9HFsYA5e@%({QG}@xygnM8@})IzX!4XBx?Zj@kEY`#G8xF^Ih$%=CCEcwqSJ@ z11D7|amde0&(9RYQ;q7V)_&+B=Q_ayCrR>9^10S4>=T|xMFU*0RkwgxkiOmZFyQOs zh)2)bEP>ItRerH_*P>y@fUfQAh~Z>)&&=>j0v_K$Td>X6PK0ckKPo*?6lZVC-}G0z zO~*$UqoOAZXUu?&#&4XqBAwnHy}f0$u8SG5=lF2o@!$2*V|I?Yu7%&b6Xa++2DQ|o z58h$A$PhYfaJGF}x{jp*v7@j_M6t7^V+bFeQ#e2vyA5S&XP2pm1}C$ZSV7aHwZUnX zj1!92_W@uMzO42(Mbg;u10uvf7Y}*lT+q{P5SY2F@u3CI9eH!l&~~>@XaF*Pz{@Kp z6t@A8vXAWyS<}@UI!=A5{JlCGJ)rAhcR8nLg+Osk16uX}w{@WUzt%wzSULSCsi7G8 ztf=HkdvksB*#2|5r{!74W`Doc(b3u2)?pQ4y*(DQ%sTT8$u;-u>}zdy zrC*n4{CyeG?3@$;z{4Mz~0b2l( zFY%-usJa@>H_EG9?&Z`=yMpssC~nLaO|X@+4^q)lOF{+saW5+7B0cCT6(_ARRuWJqCVD&7 zjxb)bbDB~kVdb9HO?3Ze?@_~#%0LBfmN|Ov7Tv10fot{3*t`}DvLKD%QE4>FB|d0K zb$K9&dGIYk8+6bGxbgLM^}{dz`NzJ({8zefEM4!GVV-tp!_GZm%stji96nvYXK@-# zZiD_RJ{zn7~gcE$Yuv7sQnM;x5Ww1hTJdU`< zVz3L@8o1@#g&IG5$FfA~&PGH``d}56MqgPY7gi4;DwdGh7c0#+GoI;Vo-JQk$gCWr zxSl-aa-0#SWow41VA5GYvHXC$2`F+^72Ul-v*us^=cCTy4-~u7v^2_S_&+c84x9VBy7ijIFi}3bPYTPAsa5n2Ad)U#rlB=Y#+?qph~a5*ptoms`@yKWyzqE!=id2 zsRF6h>1AMhv(?_Y=3|5k$(RCk#lw#uKLf(*(XL2?fj?j2zTe-!PDokrePMQ7_)0@H z(mQDGuUpsxne(C%L*Kl6bTe(8=7sb+Qi-~N&6pn3(LQSS z>6y}V0QD}}bf>~Ubc3U9a+q?Vf*I^hLMf*Rg#p&>%6gXEGED7SpeZJvf#q9A{@ro%Jm<>x z1g{yAKRr9NYHe%8vyjks*{ve)udMTABlo@bo+~OtfGV+KN=k;805h}Hy|^%}8Zf-t z)&S2Dx_EvD2c@&IF^)$J$;ZOaKkvP}aqTW8S<%yW0twL!2i zzF=0d7Ej%bt4qSrqoivbsYUdmBF)ADev$`rl%2+QzRy?n_(w-IB)wdYs0uANmX_VF z7kCtKCXMBmMeUQ0wD{x}J6S5Wu#{MVSZi=561kS4@s^8aY~N`j>V)}9y6MihwRlQ^ zMlad`iO)Fajl!gYAL3 ztvKrF1JNi|t%4MyCMIg<%%*2?pD(-`dWn3ITjq-P7;b$UO!r?idDFiVDRTsrW_p;4 zgFptA3_z|Z=Kr~$@AzZA`7=pZ(Xm)#Sj#aNm?8p-KI~i;P_UXrF;il|kd|S*H!ixB zxELGig_%qeIiR@VhTd!$L@$fu;gQo9S$dU+;A291*>xBw*s`MILZmNb5^@(tiRpsK zs+&>uhX!3`;z#2{9=G=rCWv#8o*AfDYG^XwNC$yK*F{W$ z=Zuj{bKH@z072&$Y-ANyW|`360RKwr`;<+pz)Zu)Tkqj9xcDR9b6L z>;;Mick~4d@USp`K`xX=1?h|2_zr@p(PBr9ToN#677#ziOQ#I%D`t|_8Mo;IEI){O zB%PG5aplTVQmLQsbR}161bkC|yxJ3V8Zj=HQ25o;{?=q4%=73NDj@~t3|xh_L?Ez+ z;iE;_3EQ)!)DH>lY++=+zDaey2W%a962fB9RTg?{%e zS=mFWI8(8!diaHR0=ZBdY2o`?H2LPS-T9NU!Udj$#q!tADk)@|m`Y8cH?-6DG%RIv zoyu89%=J7E=t4pMey!u5-_Bf|EdKX@yk;|uQX*hh>4iG6hGh|1sPl7FG@B`T&ScW1 z2wYU*S#x_v5zM&8nAjfx0S5zUZ_xd3)RGV*dLZ^}%@tV>7>2QAsgJCx_mkj&E;nn) zV9nqy?qAvk70=YxIu80Efh@qAxa`zV~wrAC)yaAwsgu@ z+2p;Y4r)_Sl8wza$RN{(3LH$cFJ9lhmL%52HlVmuqW(j^IP($4N8(m~qmvgC_;&*A zWLzWZmVUD;F}K1`KI0rx^4vaSI@Dvu#iGVC;^0AIV{pl*$@>3C)tkp9nLTd*H?H79 zh)ZBnqKM#9fM|%-xS)cexu;f+3gVJAYHFp^jJu#{h+1G;ii%5SZY^3i?ptauEo$jh zYN=`aGEFn}+@Ig;_k2Ik_a8auA71?7ea^YBb6xKXU{r#=>7RIYg)}jA=-Z(a5dml#VmRh>@H|KU_I)Ix0KPU_O+`?Cms3KDNrd zAY2kf-FyE~0mpxMgxtCBcwlFFF)O{e8CphE`$g?aG0pAbmbnL;v*x$wpL?A9H)O1a zWHT7I(LR+x=auPGuEYghS)#QHj2{JJ2Q>}@v5jo^c?qVANgjz*^Db5r8$dY+iq`HY6ekfz8a?DQQ#AbF zrGwju&gD;?#HuV4=)A9u{X?4xGQ$Q z{?;vO0jVSuv?ka+R1~UjRvM>g=u^|N{mCOq;@mrX3cQpxuYaf-xlAbtT}LoGqE^5d z|Mt3R#XG@k7}?y(upJNVOSZrLd{n`!m~($oz18n_ znB*O^af^OW^VJ~ZeVm39sFKPlNH+wy7$B?pXPwX)n*8w9o6`Sx(#a<)#S=RyFZ}G+ zYmxy4(|#owLF0L$VS$dUKb3^lr(HaM1WL|EIb3Ao>0GLBO@*UYm>RL7eJU^Hyzebs zf|J7+*7y8{sQxBgQpr8t)Y-OeQekCDi`BM7v*(XRxTNb~V%bQ-;+&9^!!|{0qY__5g8UEW^YZt}vnHpWxL@sh zD8MZeujU*{A6oc0fAox&ZXLqc9y@#ZP%i#WJ{N1dEtL-;js_$p=u`$+7!9RQYAyd7 z?9oycixRUmti)7BbAqPY*Hp_}YXrI4xZ4TlegP%Rw>WucAa&{Go&3cZ{1rN>i_ahp>Tlz38PtnlO z%cvA|`XKUkm9m%wvFZ62oSk-GeS`*TE>O0u1l+jg3+Dy%r)J0oPf*@?i4B zt82xU*zWw6@6^72nq1jl?%YujvzLz0_^S?umkRFpS6pMRscE(8q;_|_2mpXg2Zu^~ zavWRD5lRkVm7a>U682G}2GT$V>?Vx7w=BcVG4GALw z2x0tk{5$*G1K5|1>dhG&LY?RP2EDeC0Pp(xHJoJ>cV09a)T0ahCCt}D<4gP+_5{`z zOsNP(v^dHN%GNg#VaQdC;KYU}*R3gs^5wauk^LAx8gPP9|rv zfG6t)|Dni7(95rc+GNl14A-7EG7m?j{S2U}Xu8#`Q5>GJ6^}K8P9>CoD$R<0og)9I z=~*N(rF1RQAl!iMF3AANB?lrRd_YlY^uLOQH~6Js1QCL6YN@SiuC1tPYH4g~xZKp# zP*pvA=J%eut9MYXcBd=)JA=R9sPOxdMCP7+Z!bvf`sWvi%ilMR@Y(jVQ&nPR?5F-Y zfNRRACTTrysWY&4I9zCG|BmKBEpfzp>&@XYDAhq}DSj|5&e3OD{^i_2G`>9jK1$-z z;535V3pKfRJ8ZKgH#dU=ylnwe!<7qd5>UJxmQs<>GT?)vPF+EICAd~ntb|f7iXwD! zsrz>wda;AhZ&m+;u0KX~LWNJ)3p12-w$Bz~&P-9Vs|#}(udLEAb5mO_IZXX}X8XQ2 zXpb!8Eu1q=8>WoS0=OX~xHb~w5;jHhd;%{=3|1*svLp{iBI)RZ*Wx!LH2JeX>;SKA%SXa)%P%kDH9MC~2JS#AvSti+m_LlGz{nMceIeZs zAb1t7Rt>di3wFemTn(|1lE2^ip`7{J-UM8iPSX(*OtM{T2H zIa2iHV?!R|HD>OUC$XbP+B(P5jLy0-b;=Xwn}74%H#Kz&NbR{^R6SYmAAI3Qv{f9N z6T5%ZB->=)gMCEzMraLd+Ot~mJBbl-WoNZnw*^IU(3Kp zR07KuOt*JFU7T8Ln+V0D@NP5f=_#(Y_vo(P$~p|A++@9Jt8i~fcfdIdCAHBDR42&X zVYavJ3uhbM`eqB$o_29J>3*fy2$S6FvKv`pWgO&ENPGR{U8ngCmN{{DcZF6_Tp0+% zq{)?_sa?ev{zm1rD2hh*r7ld~!W{gDO664ED^NO8R+^}B^QHe{P@91T96Nth4d{+L z{;{%C)dq}#)`fXZC7*fwYJJ=1$!V_vne5ZwPai$ly^+s$W~EUWVaR8GUCYH}w1T|` zR>@hWyII%Ix5yFTkc>X1aLQVUfgFZkqGF6^d}~pUu}%>+#oo)?jR~s@inq5Zbllo* z{zTzSd6n=F94fvr>Vn@43!rcAMJ{b0Y8;JpN0)z_cx2`&*Go|@#C6Yvvq3_I@!?eWN7`Fgx!_5jJR()k0qFj9dZBuixc^IIec5_ADdSLCV@=9K zUyn}|Nss$c(!k}0;)3EzlzlGK0da5-uNVcx4^k}~B-|55%W=aZMn(*>%^Sm$6?%o! zIgaYJPm(e@K_5RFocgIz&8u1I96IYjzR6jwdj_j6ZVH;GZVXafKg$p9`bzpqzdDnR zeSFbpM_{teE^SqVvXXwCTfwCs*gj}#5*4Es*Q~U9E+hRjZOcU(x|>zZ{(B$1xgMCg`7$c zH|+UD8_Xac41nr?-X9p>i$Nw63_rg<{O&bibh)CafoYtORj{A{T`2zc?Fg~o4GXr_ zT$2q_&L8D_bz7^S^zSIt`Bp;G4|i+%)wh^62AFSQVsi8&hw7;SURxPF2chd#(brFYD|bu`tIs{r=;{w3@Axj>9W0*$r<*S+QzJ zpyg3)XDg$wJH^?eJIU^##^SZ2ewwv4nhrWya%asoJ0QNVw!cz9s@C#Q*@DRyU`jg` z0!Db=D22d+S4%Z_K_iQ%<3(9bb8pjAZHSht9qevz+>15G@LNsjf^%D0;$)hg+3 zHyJI}J=#T`$lu#0MwCGzZdZFRFZHi&SODRTH5viuM6V}G{=HdJeqNrcvzjHK-#s$< z*}-^*45GEwlq>Mc0N)8|{}K|%2mbRY-8mSYZUUWZj_>L6*0ylqXP2mZgdr-Y>%J6F zvu#nhEQh*&REuZm9TeZbIR&*%5uBXUNH*=WxF7AJ}|CZDyg7>=visDF*~ScrJG z{vA1!#rZjTT8a(QUjpwhOq#_MJwuJkl2EhSfVIiRzIOK+NZ9;Bt9G0|P1RWRoFYtA@r_ z0)PRqk57*}S)S^<+gQcRbhAlYw9f0-A@i28n#PIuPw`0oMgp7}^2UE)j41kkB=NdL zgQ@pbq?rU2hzVmY@t01fUi9YD-Bl;qGb(jlgN1gT(+lR}SJgA`gE}g)KG^D|&Tp;z zDN%k+0po`@&M)A+DTXS_N5wWk_xZ$8s@mR=l>rCuvTGMp!BlRl7T1jJP1ggIGmpR7 zrCLr?tK8}aw(k|O6u<<@e5&Oz^X*oN>3V3C;&nsgo}WW5)?Lf=4QD5Nlqhy*Ip@)S ziD&~e>+2Ht;*qUHeQ!YKn8wd#DQzU^&leO`POlFql<83WaD6)V!j2QG_2AXtX;uq< z_tvrPL#?-b+EOfrZVk0c5-S~cc@4Il>U&)ShjvNN9D=eYB)&$=thv#o2L4#B;JUfgO3)0ytFK2l+9re$iSN4`8*6QJ z|1yZXivz^%&4!m_RMPXZeZOX=#}J;22*@fQt8y1!gbKBsnh|%O>aM!B(U^`!n`Txx z?w4B`m%ybwAx>1_CyX_F^El0yoY^Ap^TebF?{eJQN6BdY)IYk%;_~1#&|uZ+h5fj3 zTkG{dWA_%qu{j2FZh=$c&lmq_U>M7Q*a2@q1FbcKm7`73R&aEp4A`@1sxR;^MT1z- zvxm>f3JEpRDZ$~thOJ>qZKwPO(LBkr5+pqnDnOjYskAUr`$feeTXn?d8dyIWwdjEN z-5zh>3+!6DCDU{X$Kv4UH@->sb{%Mqu9@8-V>(`~t}WI}wmeaBe}+@A0U*Bxez+5% z#-bNu19F3f!_YeR(EG6(*8&|t_C+D5KUzu3t4&3>g*#J>q zX5>q?6JKL&2-_SJg56^7Iyjh0$rK{D%!V_qzZRk(rbaa^e5oFs#;(~^&wKFK zEt&udvfSq6E|ubq2zc1!OrMQbjfQ?Q?ycZ>$PL>`>q>*VH3HfNvX8;Ny4(FiH7J&i za)S)j0n>UV!=6JmiU4B^aOWf|>)B4XYkrI)&Ho*G=AHEQlTtpRoQ8R=%j(7E`fcPx z0jMzFhjTb@s!k9V%NQ;)b0*eRjwA@9DWf40CbxRj#Xz-CVqA1BVtI{-K_a@!sKJ-1S} zxA9it;&Zo$k?Q%Y)%AKi<_&yOYO6SsCGq-}7eR2ipV-{Sck|06qhEAc@N6 zLLM)eDvL7Pj9bPaz!DYsyqliBMI~KV_`}8ZZI69(`a9XU!Ci~{J?YQ8C_Kpl!_+Fc z46Mq~%Qq$HopmohYERRBoOa9DSz%fk};$P-JHV8|JAEFK0NJp;d>J`ZmUBcG?flg-W7xw7fYMt}8no zV1@Cj@hQ`DAs2Swr>XS?#!T_-{`(YF4($+Ys@8>^vaq6bB4j?t#-K(wSt~~^b5ZQ5 ziZ80%B(ZXm>Ov`xr;Q+O=bRv*MGlO*OKn}f`h7AOb{iPp3j=4Z1DeRqX6s%ryq{Uo zVh^A^z>L~gl*zWaMqy0|{$tYDj7M?gm>kdm7BSsYlvEcMX(y4gBHL#m?mApyJ3X6t zx+mzT5|}4Dul(WKL-~G-hb_y<*YtJR-tgH$83zIN<`Pyt8U2iFpmz#vrLkVRI)#_L5MEbz5R{c*$}iR>qD1141g3I_83OBR3s zSF!*V4B*zf#~0Mis|}3}?bVlCuHL?SrM{{5@{QK!`j)Ed#V>*UvwY-O?}cWkFK6_g z8_Sg1-r_eViwwif^j%CoWDDCJLg>tguiq>D?lnlbY=YrRqJM$@_f#1BW8sd3#WdJfi*l2s5E{%KhjKtwMtHZiSSC z#FJV8Fp>vmZ{2IBduV1eJk<2-tUX77*}ah8Zq7NVxump*O~(Guh$J9Q>GltEiBfmPiv7SNGb3uhYk9^+Ns`01#n`G~r*O#VymT85V{R{jx2Ifa2NlX3>`0wJ#j4M6 zChDl<$dQL;2e#CPJQ@Oq;y$=WnU}87(9ss(c1}Ezobhw8^EisI?{9*941=sX6CLHp zOrjL^Z~vq^zJXEI``d8yR+LhamafPsbe_VdqRqr1GJyNC^>gb(1>hkTOH#kMdtGv< znd<+~3i{4%uc0UDH7(C~-FrJ8nyy*JHuLdw9Z>bDSV7R-^t)NB-|8{1Z-`$9a}{Ut`zwzNT~x=^7*3 z4NvQScqsNyh-=?+r3MJ=?i_m;?H$yKR6G;ZeCRrBI&X=y!#7+*+pmEXCRvKj5!=HT z0Mf101RG%cDQVB4%vRdPzyF*pRNQ5CD$i4I$mhp4`g=;cfyVV04b`K?#FxrY%z*?E zs$V7$I3SGX7`iU9eq;Z1lb7CM&CcEZ0TrnLP7F1=YB&`=SyzN}aF zdd&yZEixy9j)tM=9@_L{?O>n`yt#TNeUFAUJkE9=Q z*q*#6HsjKaYK()){QKD1}kr20Rn%o1W2U7Z|B`IT6K3HxxCYC>Xxs5 z@s*ovL5W@ho7s{9s}i=~e~w*X@^HzJy&+%~;ePqCHu2`dg{1;uf*mH# zS{U^kB?K^JpHdkISG}}O;eTW4)ifn^tk$q|y^AOW@AD~;LRx-e)fNl30)WRr_SFRm z52zti%zGRjg#GF*nmq)wiG2#9EFYl47ga*3P+6R_La`{ss6c8oYJv_|P_C>XaUwLV z?a<){V))xBr?OWw*yQK?f~^$PwiwAi#wm^xoW>fCgktwk#kwIJCtsgYx^nkoA)*(V zlkIIm%e;L*uJz+c3nSs4*(jxQ_Nw>ms&c4M#rT`O|EOXg0uw};*!UbPI#0g`q~376 z_`En(pSNxDI*_Cdgc;yLRv@}?23sF--`(}be}}uTlxYQ1{LtQ%_t6=iN=mS)%>IBq zvPj7Rk+|Bh2;?1d_gMK4hRzF@aAhT8+vVY@y^C+e3A%swFN|MF`5pv84iO~?xQ@X# zEvPM*zg^VU^#zaYfIa+e4+7s{JRO-Gm7_9JpT>9k{;c&bQ7x-U|Ixoczg4(Zn0wls zelm+EzAoFWcKFDc%}?`LtV{;tND8SQgTUe`x-?y<$NHe<^#6=n6Od^voqN8?51>2J z0W~CYGqVnv!BB8kP#7stA?RnP6}Rd746Rl$*+baD5rorJ4{h9#UR`xWJHkEj&!&@k zxc1{M6Lc(Ar3-Dg64>9v{eDptW`=_=s5_|M>Ug-YG-)T^*~P6bI75slfAnHzj}_E7 zZL5&GS3&CL(s@j9c?~bUj_3Gb2K#-ttUhTFsuVe}1|(s0{Ft0^pgsavHFg4mho#aR z9&YzPKKh$37g<^TQeIrQcJ)&t`_rOTlyJ#U-vKyWExFGU-xZttPAH2Vp%XeVFdoyI z5f?^b>kpFm+F{Bj+_;mWJpa6U+M%hIuxpWWWrLcaC$Dau{QYXf1E*IHr1AD?A8*v# zzuY{Tp(0^y3oEbI)NrB*>OzAMg*HJI*>TYXFIa4>(Uf|&qH{ZQ=GIhBE0Kerw2DyrC&U>i~|0y{`o-a;tqZL9Z}%>nU-Skd42wKz{253v|+Adv(hy^qkF#k zJfV&u!*S=h_(tpK%VS#!wK4b$BUzDJy3>b6#rpjE_W`|(CBCDX3Cw6Xi(D~CVwVa_ zzMnpE_eo0qnew*3tbY&pLb*rv-|zW;Q+i>f^eWAPSRB%z6ME#;b;6^dg|72}sB3%2 zsM?eOFA{EUo((T&rJN)hu#)dZ^`a)>p;Fu}%heEiE;*Zj1!z`JRon|gPv>F}z=|96 zvgg$Qe$)EwB9h=|snNDI!7^C-G5a)2g;S9lR(3NWiw&Kly0?)tdwZ$p@d7U8bT|d8 z;Ek^hu=kLP;=Cdh zogMEOs?<-?3dXtDH)%yIp$gi#&oi7S#BNT?N4OjNAgWz1k<0I9J#MgeuK}TWB%#=*6lxjKD`gJDf|AD_4Dw7ft?J(UeFiGe=S+7@5x*M7 z**DTtFyTnkwQHzGz>5T__yC^Zi2{?}1gc-j>+~*nmW7tQs=gs%D;s|OQq8Fq195b< z775|PH&JQsPLskb(6dbaX7AH zYLWPTlQ1juWR)q#k9)CR7q1}lHS^<``WzoWW>=)!!rL5Exg@iTet7A~W?%&3czAE` zXpyshbPZwDf)%-9q{g)r9G(%7EGgBtKHz=j=bgW)>bFqZeUn8~!7t#{u<*$GkA_uU zV=7W9jF1Oo)U|q=c;p+}_j}3IiVR| zbl&wqRkfnD82*r`1}*IR^X-MjTh~p#xJ~p#yI zNK+~;xq$V8jBzdO^L{jBJn$LE`C3AwbesD@-|r_xj?Bwji(KuxLLN4455v3}JiaUNdehVF z>I$D~`>O{m5x02$`Q@gjkW`wd!uH5OpEq^<_3LFx>h#H0`J$KBgJZUvdI|3O>kmjJ zF2_*AVEN3YvwVjaKGefU80ab6CWv_=C1k@@z>3>~g^(VpQRB79<91AfF| zi>BPjNi@wYtt`^}oacPDBv;?ePl}xEsto_L+-2>__T)lSVl`*iGgg!)1pmjtzdD`3 zw}o}CN=>&{uc{`_Yxd2;ljG;x#cK4LW2xw8jq5Z+#{xKLd`MMaJh>*Zvi41Q=l2=) z`2QK(rHwoTh7c?RiH(Q@}ad^OE_9qQ< zl`0@PA*wIv1YUi|J@!tr{K7tmo#mbDxo~W`OBmaPLV?Mb?L&c^KdE1IH7oHimZv9< zSJ}R2)=cM4`!+Y7rn+jrA$f;Sqkhruo-3ndVf93sEc_uVk26NNbnhlfwkq|Auq&Vw zF+)6W&OA0t=pHe{pbtt1wD!+q&5D?Ir*{ANbnovMKf?Z89ksAq`0>;22x~sQpoQ=* z_?PMK#v}gKCI)%!8BK@nk&thGXP8a@{@sMXf#o*%V2~vRPM+|Gzv1Y9xIsfdvQ|N& zrd5ul9RSW$gI7(DqCMhhlBY`VgT{$)Rqe8FCB`YE`rw8J1(l(ysZ%&IUkMiaq;i;6 z{goQmAamwuta0aCw7CXDpviVs)RF;j=%Za(CCOD`@sDrUzgoQ;;m_;ZD4DxV8Ca7n z7M1)c3HaEJ#D5y;Jm!?`C5_&AS8D5^;9>RU*7O6XRpPxTghNRKp7!3`ZCy=9j=QC| z#9!@qb5(m@y;a>+!6Wpw!uHu9VNNFhiVW9$4YdWPb;x;yIvKvbAhdccHn;_Jj-br2 zJq2bxFo}@mROrYeLI}ciDod3AWo4l#9e=^ob^5KkKO37C+f9nj5q=pW>HmKlmK_hY4?>GMhq((DI zxT1clH}SK|%68b@xaaIa5d#P*{=ZfUEF4rDz^o|9)A9vB%TlLC(H++=-z_}1c&ol~ z;lIh@duHTCcE^+@%fH1OF#ZhNo|Ev|oO21S^a z*71?hB|#`if^p6gS&rHA4ESwRYiv+$mrh+jHW(##IGV1Z3_UccldfSHY8WLF7F8Z& zsrw(dS2z8bt*>5`%1l-v?fxSWCM?&8mI%|qk~W%<;{~}wy1exTe8vPJQ#Cvtq2?`~ zuzU{QjJ~!;JixX&aVlyv^(qC$+!5S~zC4o*+aidJlVy?<{Jp zD4p}OuU(lw=BWZa)SW$mU@0WTSMQlY(6&4~h3|a$ZTD2guao9N=!=AH<}$DN=7;Tr z48LqR=k<341Srro0_BqR%m|v|r5(*VkN!J6spIIT<;B3?iyN!+O_YQv0&@-pTjcn7!eZZhCB>G3Z7pJ^-ZUo~@G%_X>mvHZoq=#70 z!n3n<3|(xW6D;Nk>;!5o!N>@imy6RIxf!{$8tW2XgUZvP$?F}(EK0mrxG2}eU7=o0 zx8#J)B}1rym%T_ftDV;RUhKm<2Zwo zxM3`-<-M%RxB^$S9PgY<^cJc2UZ07v$-h|&a@n(26E)dh_4ER6*42WI6D3sN+W7e$Hqt{qug;W z&%mV=UJ})gDUuL%AXy0%JF9b*PEL`7Hj!NR}H zB*uRhLVYC`8B5n&&eib=u)r?Yh%$d2b`Zll%ECONy4_--QMqxYc!=%zW1Nq1T|M0T zO_uQD2ZmJVvS_Wn(yUxZr(}JSN`cR$d0Ampt*HT5Vxmm;rm#3JZc&E%(OHhpLqwJN zB)L6G?qv|~+4~ML)X~`D#n8d3BC*fTTrBRdD8gqKuZ!vnGFUlmB-HWc6tEb%uCy)a zpHs#?6~CR#mwPl<4xHVM)L1`xurGz^EH4me1Mr>V@dnc6m{Ro?rda&kQJ3qq^Ncldnq-^%^&|GZ3|2}nwd z8X$MBg_I9Mqv$A}71$UurvwxS?*#7rg`RcH{T{$b_DY-_xYw=c{c*@>I;@+mWYD z_B$RbD$TV&94}#xMy{UeV99(O?SH)g1YlJzNh=OGEHqc zdfH2YRcrj?)|+HL!S++|$@IBzD1N)1J4OE*J)C6Y9Y${{`osSXW{fyP%`KF-+OA~#Z-f;mLPUlVOs<8^Nifa)pOr91Im zb@N?~1aS5qu!mPqpsa;lDuhm*Li8gsX3*ux^z=+m7XBJg1TJze(~UT96ZE!tsc=Rh z6^<~DQrHR#5Z1Z(Hz!6KAKh}B;e=*ePpuGFBL^L2uS(_~C?Ctf&UwKZ z+E^yvStoUq)R0Ni(8>04%u?$`L7`PjlGPYq&tzpE4L|^7nmnsAl3*d{YYwU@sINwr zx!aACJaX!7a*;VPV??H2s*7cQsRoAU%M)E_c zu4Z^+XM`+me5WtpQ48=q84~D_o7;n>GX^+dfJ7ga89sng!qSJ&!Jezwr!k(y3mp;p zF}aomvv4P0v)X}KkkG}{$5g9+lbQe=i)?^Y?Q%xrLV&!p@M9L=Fj;GC4WMiLN>vd8?d!Fy_V z-c>S{l{1GO$ABDrhe|?Tt050Cz-Fo>=|(tf;iO7J^q=yLEUMo!CPv-UHa7nJ$*iev1ggT5EEvr)%EIDR zfZA+FwS0^sS<~B$s$o>rtH7nW&>UnX&;*1(^comFBW-uVb30+7((q0 zGV%>F41l{vo?~9B{Yb}jXdwyd04Plv>SM)qk?D#T>J(Me@+>XHcT)wZVx7!%Q7YfY zJ3I4Xf;f~pyi5?Aj7MN#nQJ6qO5epk)p9@U&TPn5O&E|_hMD=*=KP=trR7{*IO8?7 zZD?+;g(FRBEKGm`ovYs0%C2AiH~2v8#HTuoPG8H~&7*%j(k?r(*KCWkvxzRN1d+xp zbPgOI?bp$J7y=9JIqG$9vLhlG$f`qeRnv9TP13XCNHiRIN=;B<2!&zPNrFmkoR(&K zDyb>c(CvPj!N`1)Q9PCo+K1Rf2}$@2>EvFY`A!4XTeqfUbOa*LI23lZA0DF zHEOGl+YWzKugPS%QRR3HPaH~{u$y)7SiVBTfc)2vb@?)7;k0?kgwNi)Xa3i$i>;y)~62y5~3u%KpDiI{y_ei~MsATQ}LY_I?$)x2} z#Tz68#_?TqFU{^83EW64CUr(#QOqpa7MUXvQo_nuRXP<5+g6yKjqF3Al6?5)>5&W8 zDeCMu`7Eu~Ne|bRi)g^;@VDDfmYk>VKGqWp~K#cP8NnrRIsAy4Q|IN}2q7oaq1HmDGSF0okJ&O#&m!q>n9|p; zB+Y2B;%IY)sXRF}v3#J$YWrzbpv!19shD3dJ zKDH<)pdb4}!_iA?9PG+X8m5j=!yV@*ueY&^auYVH1^hWwMcquM0YW696`4?krd@mE=f;{siCk2dzB5j zBlY7-$EMdJ!QwLw<)_8GDIu}@BC3)ol@@Ta*Dd_V9*Yf%gnL;I|5uG*|5uIh1J#H^ zzl&9<>&;i|o7$SM)YY{$JqS+8DG)y&zH_^y<-5eYX6}!T**)SNuYddS@Bf;0#h@~j zO-`m4a?4EB07vDxLJ8uA?(V5f=IVb^8*rx$S&~KjM&zLkeG)6en9~A@v2&@6Jh7S< zXcelT>r%C1n_p0l&PPRQMU|EgZpKcXC^KJsdatW&fmTdQjx>Puzr6WWlKyi=@NokUHEG@}$BSM-#!M*$*U1JS1Z!X3 ze7}1xY4c=duKvLk_r~jY|KvWt_Gb|SdH{aHAR8Kv25k9w0+QjB&OFRjb0%j6W{I}W z)x{-I*`^s}7T$P}G|Kf)WDgduq56DFx+{T~Pf+Q;s)M0nr3qyzlSNyB=7f9gGwT~M z^n&oZpNDA^Y!j-#=ytJnTZsc?{I#pybetqALe@7X!P2W0-90ocW5Zx!q1%CG?rw(p zaEGCY^H?M~F5 zv<$;oc0AQAOGLzLcE|#6MK+h4j@q&)9t--zo>C#}75mMw;>z~=nJCC)L6m2kS4a?3 zE|nI-%Z19vh>Cy+Tdwbt5g1i<4RGAb4=(&%NU;{9)Q!%x#=MI5zt|1*LBV?4B3M!A zhDa;htE$?lLKo2JdIFkX$6e^S92797u*gRaUQ_-(&Tgn`YqYT&>XF_*1DghCgX`{! z6x4ISn00x*75f6|lc8mGM*USiRJn*{Q2Q@=FYjXk@s#nE&^vqE9QKo^J9Eq+sTn39#3nA=tvpOp^Gmbo%^kZZ zJnMZl+CFN2Ryt`J?B}-RNs^<|XN=?|?YtLAUCoY0Cnga&6EjTjr}o>Equ9;iOzpO+ zH4>UbayN6*G?o`;9F~o0jvYU-+&zLUGs6pgrGWBK!9whgZ@dHdyhbqEH;w{`lC2pY z5|1Hx5owLvxIS6PR``O}7X8g~*7oJ`3dsW5CzqE;(@CZ&Y;jwUSndH)m~_nsVu0^M z@AAH$NqNM1Hyk|l@6ulb|J><%)pCk^b?Kq!5Lu{h6rKiInYLiHWX0pTR`?MeIsVcC z#U%Z)0&A-o@&kBA0)3{SYJ{P0n=DFxo{&(WQ<3j9OdFRBk|^)d7ou2&L09trmOROD zu1)pOj;4QkojSLxxyi+?3S%dZn-8UXm{Qo&LSr&4weF40)_}J41^Oio#czg=ZV&G? z^iVp^C?x3)g`mVBr?8?gVHyA+i>_kVYsbQVKKf?heb}b)NY%Ve%hhXN!cMn5L{1Zy z;u|l1uwjOcNvk6#r1!rb4u+v4ii`-F>A3ya_y&}tewe38n0#LM%imOyi&a_Z@cs8Y=5^!Zd{})9tQs30#qyFZ?kdkR|g*rcq)GX z3b$)od+=8$D!TippDlxHJ`NU{y+ja}Tcc#{O#KZ>!>p(?a%_n03&j$)%4#Hej`(Sh znkCu{dLV(HAR;*uLxT#^w=)BECiUW9;U?XL#PA(2>05vQf&7K8DNzbAI=8R#yqbMT z+4cT9F3MW$G0*zqrjD@Py7iBnUCEA587Mmpp`P%C^=K00V1Dt|nOdW5OOUuFlFHFL zfG42MO6!Y`qZ_l=GAW>e!vv}7G`#X5O{V;ndNfzQQ#dhn zdq$FTpQ~;}^f2;ozW=c)=PG}CG&pEd!M@xj}RM#MB_dkftDe* zN8%}%%Is$rGiu1^t0GJ9GMtz&*1kfrgFs=PHIt@c?Na%;#B@x0pr;hR+A2P>Kd0=J z`bUxB`;D6|CMSB`H(<&m+nwL2)tbcbe!XW6$vVB7Ke0J?;fP_)aFssR(#h2Pk$)!6 z75SiiTPRHt(9!)k*V6RbLK#Svz|*Q!{!L9T{T?B~ZE8 z$9g>$tx*_h!B-MEJZ1KWiX{2D69ghM-Zt+(2vsPGi?j+W6ZVmOhq%K`J8&a%k~vX` zUgP|Y6Y_j|6=%0jU-v1}kY1@r`gF?sKNd^B{R8M{2~c}`Z|%)h-23uxhW3zEGBD|e z<)0Y<`V(|!R3kb&$FR#LvHX#0n5ao1wF;dxH!*PzJ-)1H2mnI#St1a;8WqxdD8{Tg z;jL5C?^TZ&oJpCHD6M~;4hW$aL_eM(NV?Cyps-x10x4-W< zLv!i*UB~IT^G?&k7M7p7irl-ZT42884n|v2K=wEqbC+Z_BH~7eP)L&sBsS=FH7@@a z<3j%usGWpuZdO;+QbC*@0Oc~ck(XlzCR;0=)PrRhX*nRB_8p{h(~G5G!2}e4XjP=r ztpu@h6ZcY=H2?(Lgb46;JL5?x=#&E^a$ZBqr3gWlz_(uG@oqn}zR%;LBMSTq<%=so z9Ud@hG2DKVc~}c(J{4fkl*MS{a0~kk59n-RTkvr%+8h}C|1k9?P)X(S-~V+NMMFdb6GOxWmjXmX zEyo2F2uX2CO&t~YtWhH?tH}iw4GBRD+;Txu)6%rX)W&_w%%x3Br_vfVt!bZ`rslc7 z^MB6&_nhOA%Q>E-NAA7nem~#$`}N8tm>R+g^Jc&zy*K)-8bhGwZfmFnqw4cV^y}R{ zFj=SSU{D&%K;KxU{W5YcCq}yz@;B4fGPJPL4xyf9D-^ZQB2De$9}xuO1o@n_t;lLy zHh{6NsS@P27Zo=2c!Gb1P0B54b?}o^V(+}5PuXaIQv<%x3=cc(sRAEFHD*PnkCrGE&hgAvoNfr^@aKy^WbX_`7*k=`(K$M*UhN%*=Z5_pl=MjS~a$U^MIGcwR`-$4RUj^(p0oYtyPp!E6!kr8Yt!% zkC+;u6F@7~D#JRorVu(v7*Db6@EmOdXGQTX_$*`aV=@{u+WF6dVbAYNN3R_A#T@Ke zJ$T^HUr#>ny6v#t$_rz}rLt=Vz#O0t732%3t~^~09*6+(s# zc&6?kNi>Ig7S!$PmyJjne~T=J5mpjkua~DRBv$Xp7Yj{uidJm5TEjk=#^Yb&sIyYf z*kb$sY!3(o3m7M9M!fuM#Z&GX{4OJM(`hr{*fd=|UNI+~jKWC_K{vg79x7DibP%kC z$H%AQr*mod$N-w9wH~r7{p7iRjN119j@^(=3S(0QXp8za5eS{7<2)tw7(VWqa`me9 z&4(|1IxfZ>&|I{b2|9dv@Lc>)!#7~J?|Fr)%MeNs^9Ot1VZx#KW3if1W?mXNjqw?p zEsdaupyRCXravguRml@tX)>ztv^EvI?q(-g;w+h<2nRthCLQi6O`dialzLDll@|cm zBy>T+346;GtQhFOpe($Pu)OzxpgT8$+3M3Z>7mV|7jIauB(3^3&ok^VX zL^Tl*IlgYa*^FM+qi~;=1Yf(&=uRFzuByA3A)ga_`rkL+X-+B|(Z*;{Ap?H8FQdta z$E@5q@ zDt;5O@b=u=h}7cYD)+&?J+4Cl z&zo2L!Yt19t7~5Js;)ej%DK zEV<$7jB*i3=4*)-zuZ2A^2RExF5>uMX01_uX){wyj`JF!D0=ARl%su@{;TJ>*GC)y zw+l5sE#aOBOkZqn9sPsXHMZl0E#M^TenrZ?^blp~NoN5{BAXv4lZ5tj|MQBnoKq4= zlB@vO9+f**9>Ra1(KoZ!|LXiH-k)du88vWN>EZjNGn1Wi*pmNMT@;I?;?{_{g6YpL zw83l-q~cFh^lVVpD8hVMNgDQ0el&p{Ky1|ID4Y?wWgNr{>?~^pb?EPBr!vo|gL9U@ zQf6xx{FEpw5ANcPuW_AHHQbtr%ea|aOW%)eYCT4V8w|B~v&H`Qtw>4yKNeq;Y|skd zIn$jN49y&dU;Z=jYr|9p!W@hr7Dcb7{QvBS%>NxB1cTSdt_$~Vku5D(uC~qcB#(aU zynXY|^($98S`{5vuXg-sjCoaR_m9cawkH4X5!*|rPqbsIUGILLJ#gJw&Z-g}&MskOwje3zW1Ab{fU}D( z5khx~^TrOE-o^r{@lNL{pGvh;qcYA?rUSt4C#|3H@6O5}YW-9DwjA5eZiD})ed&yQ z+sC=VMTbD6`@1F49AuxVr@xkcVG(X#^xEJE&rhnz!&+rZ(crdZG_pWg4$5^?%BM0? zbR#U^p?iBuGtc&2LcF1T?gGMARe%~)LV{Q496diRH z^enkAR1)j;)i%N@GFVdx0)R^Y`fowE-&h`TwFiSZ$o}2ACd+Y`e%~oE>#oS%+recz zbX)|Sfkze&2rs^qEgAT2SG?+i=b?Z4-M-hR#+7;ZwFy;_dejHTH4 zo_hTAtpVTUrHZEq<_Mro1*I)w9SsA1Jl3OyR&-v&yolT6|0Z;sSdf>gL3SSWvE|Xl zRJOu8$jLxi8>XY5iab^s;-TCQQ;UXq$3WUKe4oOi=tqM1KorMf<(jx()8*>^w~-gh z9=SgL_9fz5&cwCfzG-ak>|9v{37LDh_G_4HXJOs$J(Dpv8 z$+;0URRBrbp-7rJ#f7@d!ZnvCDO0*t)BYByk$Ieq`Lxy^Tuv-gxXur=h0X_4pz543 zr;fu#s$t8eY10euZmD9T$GGSDr8oqVj&ov<@->sCaw=ZUOc7`Sx0|amYEm*|doCnx zt2R4_YlyV*;GV3)JGD~i*BPpy5Cxp_<~NG(S6>&{#fs(7!6?jfEQs1qu!j!zal>-H zJ)-5a%A=87TL5a=NVKo48DW~asVlh$8YL;MevLC9)(RDo_&jYDnjl|ju3o5##|^oW zOJL|Uz??C|Bha{HlaV?DCYjx79J52+6ROp9infPKszQ%MXE@%=@|cVgA+B}x@RPC& z!5x+Hc7$E7Th_HMT>e-T3A)@k7%OJ^!3vW`+-C+37p7S_&l-)afnhVSl0ye0A&mM@ z`!B88K1ZFpvi1LcbMw=%(L#o@MATSo#8&)itda_zDJ5ve z6bdPVP+=bNY-d~IqjbmZA8WEaG(Ct1H%@VPNa!%)QO(#(W{Np*RhU~lS2Uq%Je#z! zxERjer>C{^Vjs8qEzy!6NVT6@PZX~;E=NTzK#jkJaBrV|lq)qKg>S9#?erX3MWk}u zlrzL1H{^5vzV=9+hI{gb=$6`haszxcU!^a;<^-rbHV+`2RAKpS18a}jlP`nLuS4H% z>s~{vlg{oBZMMJl%Z5VoDn!q^$h#SW)xacs<%YP`VT-(0QV4k#`vo449PQ`Qq!~yi z7~Q1%RF!0qd}sYFXKKq$xg!*L89s5i3BK|AUMF(a{i;fEt{}%8laP6Fo5sLlcSD?t zW^CQjedn(*70V$i&LLgjiV|fd8~UO!rJ`))U}eJ&MRDyz)2gbfN!_*S@u*4`BR>u4 z?V;1bajvNTqTc`g)=k}Atx$=(auj0e(i$q#WagiV{c4syuDo#hfC{$t#)mF5k4 zDAu=Xw=e=a=){0~KyFDP(vpLKj6Xb}GymWH)0s*ZdJHGSsUlj{AKa)}b%cpk(d()0 zTEqHd5W~P4t8$k!R)~EV%MiagUELQ!#SD^eUar#0adJ6QN5v91l#A1!V-jWLZEeJk zW$FC9BgY;Dsa6SU(ayS@>nMdcT912wHpKg;Ug|=Su7FmWPO%Q0nHyCPuiQL$=^hO3 zpQk1?tGzV#rbAqoU$nf~(Af7g?6pfdO=}!x5ue~Po+6Q@*@8&jzm7CGoGejI4kKYC zwabK=9E*CoTpxY;-sGfe`tJ^plomGzM z1;d5V(l94K59>m(N12tESE8p(h*B$LyMbm8rb}ml#T-&A{{dx|S$g`45)g^RMwx8+ z`Ns$~_iZWg2!h(!Uh>{vU*JJ}YElHDjDK(Edi5rgxff`Jxwa1{<4 zg?43e@}DExlD2v3C+oj;p#W4F&m z1VSfp;x;GI<@(4n>b{YFKsaMcRT@H?`3*6kULYnLe|BueOc&s_{i5A|?4o~>ki(_( zSuVQXzByZq>0KM@^%{8r2S(cIGN@A|hlV+Q{tYc*4>HTdAF)y9CI^5~TprgXgUL*; zvHH7@#{{qU(IhXr9Cv(2etyebbj^LXkN!Q1Vwn=Zu_2XZtWt^tfhTHrtX@9z|MhJ* zvRU%^^Jlvv%CXXm-*)7$$DTfYdh0OtJYo!|aT^)6)pWb;=%HKQGMmMKK03(aIk8I3+VYq~XS1{N?Ln_}r- z-t>Kj1D+ssHs;3D%x_2XSuNtbfno%I^TyJT%3A%)E2O*PmiFg_GIqLH605pRRo8ew z?)-CtY0Nmql%ID^r69Tn+F?H)+l-bk{`|J|z`3g;hF*OjEIr@W{B%BV=rF0jyx|-C zrRbh1tWE5>85V?kV?Q3O*!G(&DXEpeb$fu<_)JvdL4Fcab{orW>=vS zziXyLJ@7hZ9f_R*$52KAA}OsAB-fenW-!?xGO~#0rm9ryp{K>C4a+2{H(;d}+|iaN zi^I#L`VhzVpEPUKoP(UHdauj@Q8u|V7XrH6P;{96Xv5IZr?vZ|)m!DI^10Ks8GTFU|Zoya4b1kGkBR(D$oIJ_-bG;R$t#Ac465n)z)v4ipJ*65cQPY zZk?lF@kVs#n4r59AH`%}Ns^6~Vp9bDYtct3%C=8*(1odC&OwCL5Lt{)2YrEgsayBZ zLt)3;!!cJMuo{}MO7wuem5%^0D?dg_P7UWJDM$VEraPUujRy~|lX(}UHQ%FDv1Zp& z)BqDFI7CrkCOmz1_g`2aZ2za!q4r-&G=A@NdKBAJMN1jD}!f&RkDzMGwWBEWq#nxg9rzorUP!jRi#m_-}_dm+@ZweHyMqDcmsF! z5F;vHbs2$9P8S2f^|4%uT&3GFGOwFI+tyuila^&loRC=|mGt#$aU30@)VJ@S(TUym zpoe12KQH#08bb*Hq*2b-0#0j=cSEcg$LNuegxhE5)x-!|OT)AZhG&tRCS8^G@8u?I zO_t<8jd$B$v;UXJrzwGy}5gP`91J7MoK{eJ7DJRhDEO zFv9H`j-!&$_M{rUsCVRiB`zD|j}Itje9RyNfM%as6Q5qrXyYx{=Nwxe3oEcHpyUnh zN$}pEWjOx2_qVr^o1{hf=(d}OCyv;k^q%p_cuOtFc1%ndBwR)t0e#sf(>@x&oyN6N ziI3$e5iGgvt8KFLtCbnZb{sG4)t*f- z^RA6Qps*C;wCUIsaX*ekZuX5=sK^SDbfza3OFN-;?4KHlJw3;YLU_~pG&^ZTRc$go zYvpq!@Hn(O((1hD>QqH+&?nA4Al`6wJ zS(DU3IL@3A0DVfzcQlP;rwv%U#`iurBlMac+qj&1n;u#B<4AZQO$&l8&ik|3>0>j~ z8>q2DU{%`?#n#oxn{hawoV7`=Gg`6CwcsLSThgYH3Hh;_sH3xv4|Bi6r!*g%`WG4Q zQL3PPp1;zj%r>h;)rl)ha`Vc7>&iDzXO-FLNr#3*!zb_ zsVzpUk32{i3Vgb+_X_VY{r^6)CNzRvX{^ z=ZkCix5dLtBr~F24sQCEjz+-ayrU6`HXZr3`SJ|&jIU*}XJ84S-*A-fre3ga_DlJr zcW=*0vY@&TeU`|rzb}Rj>Dp3}O}IFiy<=9Q$8L;ZB*m*P?IlEafI!0(!Q06$xLH!5%M5cCDEwH^xf%A~Td8ZAYsVI&RhEzY(Nb^wjCh31 z99Sx#yX(1258iONk?+dXD$*PX?`UmzuuVtBy7|XVl&W#mRaD&ri}WFyFIj=REsVv; zv(_&OohmZ&#Vyx>javQswI_VtsaSk0@ubs};+O$+QbwyPadGj#Z||gfi_6H+q=)Uw zls1#SzXTJvg!l|+=)Ebu>GR@&_{m+5GPt_htVJ1dnW>>rVPgufcj-LTaQAsWOAmq6 zNDLI7<^lT5ku{tNSV<*%lz7i*%-b|QUVazD!xqn}ZqF5@HUrTEbO);HhyrKO0ex^tA3KqjHyONx_9~pP|^RJ;Cwn9ff z^6bq~XOc1+kgCk=k$U2tfj8Bgg$<4McGIpbyO)Jhhc1gDA%M*4(F_(Wg>~D0OErBJ zz`#q@#fGmTKrCF1E2TQKRPH3u+YETL>j4@^C#uc+gJIZ*Ec65X!-9l$njzB&XgeDl zqZ@aYe%84n-;X~>j{j(Sks^7|C7?*m`0{4DxnTC2bm)n@Lu|P&*1RmydVxOc|JCo4 zbHqR0EXz2YsuQ8`wdy?{(_u3tFY5lg3eRsMlQ$N!!8E(5!K2Kxn5Muu zc2GlmG)9*NbBCRS(fwR071qUhdG6;Wg1K>KyqxnuRgHuA;O;o?uRBFCN5Jjc*ehsc zg!XbUdhH5azjovBBcff~mzGBU(OZS;Ezhx$KtuPZJKd zy~{X$=lD^#ckEZR95uigO|-+ijixyt0Q$m|?u@Q!e#JJg?$xE7tQ4m1tSh>+lGTB` zjh^h=TGq_WJW7LMG~qZO&|Dn7o2VdgS49|%o)>x~pE$^^WCqO&3#N9ozHuw6<%<&q zwjB&Fudp=dG0yFZ5^=1H zxvMjuJl8C*dYU=hG{T5^HHF`$9{hBCTD|#&N$U2De)LXrAnS1nP+h=a4@yWW$(IsM ztm!)YCfb^+>gL`25N<0%>yMSY6cW|QHg$bp43+08a8!*(=BR7XZLsEx030X?-5XM5=k20$i)*i47FIMi>^M9 z8Yuc7%M`&00#sWX$q;<2Zqt1`_mN3>n16<6^-^p@Sr2(BbrM-&1l2Pip^M--jWOxF zA=%h@srm=H*cr43tWsF{MsZfAs)dMGDyp^do+Ppr#gL^{vE*>jyo=b5t2^)ETVblg z8>finVTeUtV&-)>|BN}7hqiCszlESUh-QhJFz7YTs+*~j*<0jiuhYt3xJ7M!#cxM5 z^&4ZI0B9pgYv#b6OQ=F34;xt2|D$NnJ(|E2QOkyY+5dnYD7OBX z&y4bOtT42?xZ){RoEVn+&f*MVMI$1Jn^)^80L-YQCb$RX<%T1C z9?>5Gtc2)FN3~B;-8xy;@n8LdXo~4V#vA}7j?e=Dlf(tWy`uH?*z?s8^>RmMN8N2_ zSwljOLl4P3&d5=hz#G8H)mU2a94m`^m+%bjq9N%B(%z^*mgcyNcYPKn9RGrW6mD|H5CzW#|WSuO2`@=Fb!Z*YBS19XaG^MtE z#7pbSPqy_g=0Q=pvE}P(Keej1mZ}`{5Zj(7H9yZUEmy47H2hc57Zz{M(T|c2QS1QZ zrFd=Gv=~X! z>;JvH8+rSSUAxBxmF%A;4@#3wn<7stUGhYH;_i?pQ-s<1x+;gxjwAQf_+0(*pPT7v z9Fw3b-%JHsz;hIWQvwl|w{a#j%%=5!ZNev`*Tl!E_6^M+uci_u4&FpD$RS|La2UgL z-kkTy!Vuj$dL698+RE`l4L>!^>ROTeSii5YvwE(P^)Kk1p+$w>$w*0)G6gTqSag2( zt9=4;?~cq9=Itj}G)ru{it*Y0a*NaAkQch;hsSeqy{DLnwyif)cN!<{-?>Hnm$d(I zdJ@^R>va6E2{o+W74l@`tV;d1aZmu$_p95*F3>c03{}!iHf{fe+<5t;TQC+;E(*zn zW~A*yAEaJ!9ou_#$XFT9NsGanXs50c#-H-Schob3NpoQ9L()DMKPGxaaXjRLDlY=^ z7h@jom}`gwX^~9Zt8__Z%22+;g=jR35E4P9bV#S(WHL ze@_0O+_&Lp=IKA-d4A&x#c)8_q4kb&>7V(&m`@t@ zcgbipI~UFvuLtMA&c1QXqc``5x&Pr^z zO+2QqNGunYD~SXaXvC=6w1)O7eD$c7q@3dN#15eNu0IPnyX@^6wbRr_sq@%@S&|?> zt$duj=C2Mc`K-DZTincH9QyuB)P9|MIkWc@yAK9GH-hmFgisN8 zy}_HR`Gp}+-=fv@u}H|0Ak9aka!VzzcmuJjbP4ex(>fQXT&p_6GSe}uRpmWeoP%dB zK7F$F_0>@NSQR(Cjk%py!TC~CO0n_J0z|YlEzBBj#Hg-pK-I%!!$S4Hj@+cMLc_{k z94;CnJQ98WzwEZ3R$%Vmm6tB9!!KEe|8$9>|E1~n&>H0Nef$*f;Z`9rDKNb`B#XuZ z(R}&g7KY=09BD$N*G|(da6FvQegwg>UiM??B2KTmhV-kA+_1}Q$|`}X@9AijWgs1b z*r!o9M%#~H-76KIB$lw9&+ifTgpIxYVpZ_KkkLu1z;u~phg^RHTvE+^>AAjn^V5&P z?Mg!!K}XYIZK>{djb6clt!}?u2p{VES0gb_Ir3I0?_Am-JuN0EjG=WIU0_5woRv<#~LCeSGg-KX;QI7u-!tBu2$maCfMpC^l?>$vd zryepxNcHfrsx9pdxz?l=rz2y!$ON88gUjPr{pk73VvqO=T@1D_n$zlR}sAv($@ulX|yUKy|Rk{ z6_IWG+V8&IM7Cj-pwMq=fdUj)tapj6c>@1x{IE%F1*V*yV+j1X5SC-!uX$TIIC7@R zJOnPNc|%DFAH3Wg_(8!dtvMBpmz{F;EXffVYu9@~cgPd{M)C%uQi|u|Q-}vRz7`Uw z0PPc7-ma5wJQ1$P51)U7$F#@%UNYjwws5NHE-f%po5p?JU`_w^oNM&{$2r8FUA^&} zj%BMmzwS4v;RML5@;$Ry2?jwrCwqr*_y;2FQ!CY1(3n7ngJ8a52B+pG$fXw>=uBnh zs^yk%3mNr0x*Y&<6vh6xv(w9415KL9-fQX;NjKOI9%Xn-ol9(jNG#qfi7A|~Ng-N| zV_Xa+ueD=5#LR?IFj@va5J-_`=+Osqj>{nwV{e?JxU=cJvy#uYe^U1M=$5ug-SGIE z;(2@Rr!@TMoN^DkpVj=k#g>-ew{r9e!xZ>ya?w2*Gi)Y7YfTW~wpZofhIr#^fV4&pLf=@S#f)F@+i{=BRSoKXMYDZ;}4P7y$T37{o9$wRg@H#9Z3UhQmb zyVBU+(A?Q}G5tyi0u_J=mU(72m?cT3lH?$2?x`P*$T+x@ii>$Kgx7sQ-x;Sdr9 zZNJrmA!5K79&f_3U5Xnou{I-hVL`6_4k@y@P=;zTzp?CC@O3 zxHF*SyiuZ5(`hxNL--#|`OBN>(nOC&!^KzPtUYi){