Merge branch 'main' into shell-commands-new

This commit is contained in:
Paul Gauthier 2024-08-20 17:33:30 -07:00
commit 18d6260c44
10 changed files with 41 additions and 23 deletions

View file

@ -1 +1 @@
__version__ = "0.51.1-dev"
__version__ = "0.51.2-dev"

View file

@ -98,6 +98,9 @@
## Enable caching of prompts (default: False)
#cache-prompts: false
## Multiplier for map tokens when no files are specified (default: 2)
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens:

View file

@ -102,6 +102,9 @@
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
## Multiplier for map tokens when no files are specified (default: 2)
#AIDER_MAP_MULTIPLIER_NO_FILES=true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=

View file

@ -137,6 +137,9 @@ cog.outl("```")
## Enable caching of prompts (default: False)
#cache-prompts: false
## Multiplier for map tokens when no files are specified (default: 2)
#map-multiplier-no-files: true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#max-chat-history-tokens:

View file

@ -144,6 +144,9 @@ cog.outl("```")
## Enable caching of prompts (default: False)
#AIDER_CACHE_PROMPTS=false
## Multiplier for map tokens when no files are specified (default: 2)
#AIDER_MAP_MULTIPLIER_NO_FILES=true
## Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
#AIDER_MAX_CHAT_HISTORY_TOKENS=

View file

@ -36,8 +36,9 @@ usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--show-model-warnings | --no-show-model-warnings]
[--map-tokens] [--map-refresh]
[--cache-prompts | --no-cache-prompts]
[--max-chat-history-tokens] [--env-file]
[--input-history-file] [--chat-history-file]
[--map-multiplier-no-files] [--max-chat-history-tokens]
[--env-file] [--input-history-file]
[--chat-history-file]
[--restore-chat-history | --no-restore-chat-history]
[--llm-history-file] [--dark-mode] [--light-mode]
[--pretty | --no-pretty] [--stream | --no-stream]
@ -204,6 +205,11 @@ Aliases:
- `--cache-prompts`
- `--no-cache-prompts`
### `--map-multiplier-no-files VALUE`
Multiplier for map tokens when no files are specified (default: 2)
Default: 2
Environment variable: `AIDER_MAP_MULTIPLIER_NO_FILES`
### `--max-chat-history-tokens VALUE`
Maximum number of tokens to use for chat history. If not specified, uses the model's max_chat_history_tokens.
Environment variable: `AIDER_MAX_CHAT_HISTORY_TOKENS`

View file

@ -14,7 +14,7 @@ This usually happens because the LLM is disobeying the system prompts
and trying to make edits in a format that aider doesn't expect.
Aider makes every effort to get the LLM
to conform, and works hard to deal with
LLMM edits that are "almost" correctly formatted.
LLM edits that are "almost" correctly formatted.
But sometimes the LLM just won't cooperate.
In these cases, here are some things you might try.
@ -42,14 +42,14 @@ Models: claude-3-5-sonnet-20240620 with ♾️ diff edit format
## Reduce distractions
Many LLM now have very large context windows,
Many LLMs now have very large context windows,
but filling them with irrelevant code or conversation
can cofuse the model.
can confuse the model.
- Don't add too many files to the chat, *just* add the files you think need to be edited.
Aider also sends the LLM a [map of your entire git repo](https://aider.chat/docs/repomap.html), so other relevant code will be included automatically.
- Use `/drop` to remove files from the chat session which aren't needed for the task at hand. This will reduce distractions and may help GPT produce properly formatted edits.
- Use `/clear` to remove the conversation history, again to help GPT focus.
- Use `/drop` to remove files from the chat session which aren't needed for the task at hand. This will reduce distractions and may help the LLM produce properly formatted edits.
- Use `/clear` to remove the conversation history, again to help the LLM focus.
## More help

View file

@ -4,9 +4,9 @@
#
# pip-compile --output-file=requirements.txt requirements/requirements.in
#
aiohappyeyeballs==2.3.7
aiohappyeyeballs==2.4.0
# via aiohttp
aiohttp==3.10.3
aiohttp==3.10.5
# via litellm
aiosignal==1.3.1
# via aiohttp
@ -68,7 +68,7 @@ httpcore==1.0.5
# via httpx
httpx==0.27.0
# via openai
huggingface-hub==0.24.5
huggingface-hub==0.24.6
# via tokenizers
idna==3.7
# via
@ -92,7 +92,7 @@ jsonschema==4.23.0
# litellm
jsonschema-specifications==2023.12.1
# via jsonschema
litellm==1.43.17
litellm==1.43.19
# via -r requirements/requirements.in
markdown-it-py==3.0.0
# via rich
@ -112,7 +112,7 @@ numpy==1.26.4
# via
# -r requirements/requirements.in
# scipy
openai==1.41.0
openai==1.42.0
# via litellm
packaging==24.1
# via

View file

@ -13,7 +13,7 @@ attrs==24.2.0
# referencing
blinker==1.8.2
# via streamlit
cachetools==5.4.0
cachetools==5.5.0
# via streamlit
certifi==2024.7.4
# via
@ -64,7 +64,7 @@ mdurl==0.1.2
# via
# -c requirements/../requirements.txt
# markdown-it-py
narwhals==1.4.2
narwhals==1.5.0
# via altair
numpy==1.26.4
# via

View file

@ -4,11 +4,11 @@
#
# pip-compile --output-file=requirements/requirements-help.txt requirements/requirements-help.in
#
aiohappyeyeballs==2.3.7
aiohappyeyeballs==2.4.0
# via
# -c requirements/../requirements.txt
# aiohttp
aiohttp==3.10.3
aiohttp==3.10.5
# via
# -c requirements/../requirements.txt
# huggingface-hub
@ -86,7 +86,7 @@ httpx==0.27.0
# -c requirements/../requirements.txt
# llama-index-core
# openai
huggingface-hub[inference]==0.24.5
huggingface-hub[inference]==0.24.6
# via
# -c requirements/../requirements.txt
# llama-index-embeddings-huggingface
@ -112,7 +112,7 @@ joblib==1.4.2
# via
# nltk
# scikit-learn
llama-index-core==0.10.66
llama-index-core==0.10.67
# via
# -r requirements/requirements-help.in
# llama-index-embeddings-huggingface
@ -122,7 +122,7 @@ markupsafe==2.1.5
# via
# -c requirements/../requirements.txt
# jinja2
marshmallow==3.21.3
marshmallow==3.22.0
# via dataclasses-json
minijinja==2.0.1
# via huggingface-hub
@ -142,7 +142,7 @@ networkx==3.2.1
# -c requirements/../requirements.txt
# llama-index-core
# torch
nltk==3.8.1
nltk==3.9.1
# via llama-index-core
numpy==1.26.4
# via
@ -153,7 +153,7 @@ numpy==1.26.4
# scipy
# sentence-transformers
# transformers
openai==1.41.0
openai==1.42.0
# via
# -c requirements/../requirements.txt
# llama-index-core
@ -249,7 +249,7 @@ tqdm==4.66.5
# openai
# sentence-transformers
# transformers
transformers==4.44.0
transformers==4.44.1
# via sentence-transformers
typing-extensions==4.12.2
# via