Merge remote-tracking branch 'refs/remotes/origin/main'

This commit is contained in:
Paul Gauthier 2024-06-27 22:42:57 +00:00
commit e695a1131a
27 changed files with 314 additions and 220 deletions

View file

@ -1,7 +1,11 @@
# Release history
### v0.40.2
### v0.40.6
- Fixed `/undo` so it works with `--no-attribute-author`.
### v0.40.5
- Bump versions to pickup latest litellm to fix streaming issue with Gemini
- https://github.com/BerriAI/litellm/issues/4408

View file

@ -1 +1 @@
__version__ = "0.40.6-dev"
__version__ = "0.40.7-dev"

View file

@ -29,12 +29,6 @@ def get_parser(default_config_files, git_root):
auto_env_var_prefix="AIDER_",
)
group = parser.add_argument_group("Main")
group.add_argument(
"--llm-history-file",
metavar="LLM_HISTORY_FILE",
default=None,
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
)
group.add_argument(
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
)
@ -236,6 +230,12 @@ def get_parser(default_config_files, git_root):
default=False,
help="Restore the previous chat history messages (default: False)",
)
group.add_argument(
"--llm-history-file",
metavar="LLM_HISTORY_FILE",
default=None,
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
)
##########
group = parser.add_argument_group("Output Settings")
@ -345,6 +345,12 @@ def get_parser(default_config_files, git_root):
default=True,
help="Attribute aider commits in the git committer name (default: True)",
)
group.add_argument(
"--attribute-commit-message",
action=argparse.BooleanOptionalAction,
default=False,
help="Prefix commit messages with 'aider: ' (default: False)",
)
group.add_argument(
"--dry-run",
action=argparse.BooleanOptionalAction,

View file

@ -13,7 +13,6 @@ from json.decoder import JSONDecodeError
from pathlib import Path
import git
import openai
from jsonschema import Draft7Validator
from rich.console import Console, Text
from rich.markdown import Markdown
@ -37,7 +36,7 @@ class MissingAPIKeyError(ValueError):
pass
class ExhaustedContextWindow(Exception):
class FinishReasonLength(Exception):
pass
@ -221,6 +220,7 @@ class Coder:
test_cmd=None,
attribute_author=True,
attribute_committer=True,
attribute_commit_message=False,
):
if not fnames:
fnames = []
@ -280,6 +280,7 @@ class Coder:
models=main_model.commit_message_models(),
attribute_author=attribute_author,
attribute_committer=attribute_committer,
attribute_commit_message=attribute_commit_message,
)
self.root = self.repo.root
except FileNotFoundError:
@ -810,28 +811,43 @@ class Coder:
if self.verbose:
utils.show_messages(messages, functions=self.functions)
multi_response_content = ""
exhausted = False
interrupted = False
try:
yield from self.send(messages, functions=self.functions)
except KeyboardInterrupt:
interrupted = True
except ExhaustedContextWindow:
exhausted = True
except litellm.exceptions.BadRequestError as err:
if "ContextWindowExceededError" in err.message:
while True:
try:
yield from self.send(messages, functions=self.functions)
break
except KeyboardInterrupt:
interrupted = True
break
except litellm.ContextWindowExceededError:
# The input is overflowing the context window!
exhausted = True
else:
self.io.tool_error(f"BadRequestError: {err}")
break
except litellm.exceptions.BadRequestError as br_err:
self.io.tool_error(f"BadRequestError: {br_err}")
return
except openai.BadRequestError as err:
if "maximum context length" in str(err):
exhausted = True
else:
raise err
except Exception as err:
self.io.tool_error(f"Unexpected error: {err}")
return
except FinishReasonLength:
# We hit the 4k output limit!
if not self.main_model.can_prefill:
exhausted = True
break
# Use prefill to continue the response
multi_response_content += self.partial_response_content
if messages[-1]["role"] == "assistant":
messages[-1]["content"] = multi_response_content
else:
messages.append(dict(role="assistant", content=multi_response_content))
except Exception as err:
self.io.tool_error(f"Unexpected error: {err}")
traceback.print_exc()
return
if multi_response_content:
multi_response_content += self.partial_response_content
self.partial_response_content = multi_response_content
if exhausted:
self.show_exhausted_error()
@ -1101,7 +1117,7 @@ class Coder:
if show_func_err and show_content_err:
self.io.tool_error(show_func_err)
self.io.tool_error(show_content_err)
raise Exception("No data found in openai response!")
raise Exception("No data found in LLM response!")
tokens = None
if hasattr(completion, "usage") and completion.usage is not None:
@ -1129,6 +1145,12 @@ class Coder:
if tokens is not None:
self.io.tool_output(tokens)
if (
hasattr(completion.choices[0], "finish_reason")
and completion.choices[0].finish_reason == "length"
):
raise FinishReasonLength()
def show_send_output_stream(self, completion):
if self.show_pretty():
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
@ -1145,7 +1167,7 @@ class Coder:
hasattr(chunk.choices[0], "finish_reason")
and chunk.choices[0].finish_reason == "length"
):
raise ExhaustedContextWindow()
raise FinishReasonLength()
try:
func = chunk.choices[0].delta.function_call

View file

@ -331,10 +331,7 @@ class Commands:
return
last_commit = self.coder.repo.repo.head.commit
if (
not last_commit.author.name.endswith(" (aider)")
or last_commit.hexsha[:7] != self.coder.last_aider_commit_hash
):
if last_commit.hexsha[:7] != self.coder.last_aider_commit_hash:
self.io.tool_error("The last commit was not made by aider in this chat session.")
self.io.tool_error(
"You could try `/git reset --hard HEAD^` but be aware that this is a destructive"

View file

@ -17,9 +17,10 @@ from aider.scrape import Scraper
class CaptureIO(InputOutput):
lines = []
def tool_output(self, msg):
self.lines.append(msg)
super().tool_output(msg)
def tool_output(self, msg, log_only=False):
if not log_only:
self.lines.append(msg)
super().tool_output(msg, log_only=log_only)
def tool_error(self, msg):
self.lines.append(msg)

View file

@ -441,6 +441,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
test_cmd=args.test_cmd,
attribute_author=args.attribute_author,
attribute_committer=args.attribute_committer,
attribute_commit_message=args.attribute_commit_message,
)
except ValueError as err:

View file

@ -27,6 +27,7 @@ class ModelSettings:
lazy: bool = False
reminder_as_sys_msg: bool = False
examples_as_sys_msg: bool = False
can_prefill: bool = False
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -166,6 +167,7 @@ MODEL_SETTINGS = [
weak_model_name="claude-3-haiku-20240307",
use_repo_map=True,
send_undo_reply=True,
can_prefill=True,
),
ModelSettings(
"openrouter/anthropic/claude-3-opus",
@ -173,11 +175,13 @@ MODEL_SETTINGS = [
weak_model_name="openrouter/anthropic/claude-3-haiku",
use_repo_map=True,
send_undo_reply=True,
can_prefill=True,
),
ModelSettings(
"claude-3-sonnet-20240229",
"whole",
weak_model_name="claude-3-haiku-20240307",
can_prefill=True,
),
ModelSettings(
"claude-3-5-sonnet-20240620",
@ -185,6 +189,7 @@ MODEL_SETTINGS = [
weak_model_name="claude-3-haiku-20240307",
use_repo_map=True,
examples_as_sys_msg=True,
can_prefill=True,
),
ModelSettings(
"anthropic/claude-3-5-sonnet-20240620",
@ -192,6 +197,7 @@ MODEL_SETTINGS = [
weak_model_name="claude-3-haiku-20240307",
use_repo_map=True,
examples_as_sys_msg=True,
can_prefill=True,
),
ModelSettings(
"openrouter/anthropic/claude-3.5-sonnet",
@ -199,6 +205,7 @@ MODEL_SETTINGS = [
weak_model_name="openrouter/anthropic/claude-3-haiku-20240307",
use_repo_map=True,
examples_as_sys_msg=True,
can_prefill=True,
),
# Vertex AI Claude models
ModelSettings(
@ -206,6 +213,8 @@ MODEL_SETTINGS = [
"diff",
weak_model_name="vertex_ai/claude-3-haiku@20240307",
use_repo_map=True,
examples_as_sys_msg=True,
can_prefill=True,
),
ModelSettings(
"vertex_ai/claude-3-opus@20240229",
@ -213,11 +222,13 @@ MODEL_SETTINGS = [
weak_model_name="vertex_ai/claude-3-haiku@20240307",
use_repo_map=True,
send_undo_reply=True,
can_prefill=True,
),
ModelSettings(
"vertex_ai/claude-3-sonnet@20240229",
"whole",
weak_model_name="vertex_ai/claude-3-haiku@20240307",
can_prefill=True,
),
# Cohere
ModelSettings(
@ -328,7 +339,7 @@ class Model:
self.missing_keys = res.get("missing_keys")
self.keys_in_environment = res.get("keys_in_environment")
max_input_tokens = self.info.get("max_input_tokens", 0)
max_input_tokens = self.info.get("max_input_tokens")
if not max_input_tokens:
max_input_tokens = 0
if max_input_tokens < 32 * 1024:
@ -375,6 +386,15 @@ class Model:
if "gpt-3.5" in model or "gpt-4" in model:
self.reminder_as_sys_msg = True
if "anthropic" in model:
self.can_prefill = True
if "3.5-sonnet" in model or "3-5-sonnet" in model:
self.edit_format = "diff"
self.use_repo_map = True
self.examples_as_sys_msg = True
self.can_prefill = True
# use the defaults
if self.edit_format == "diff":
self.use_repo_map = True
@ -554,7 +574,7 @@ def sanity_check_model(io, model):
if not model.info:
show = True
io.tool_output(
f"Model {model}: Unknown model, context window size and token costs unavailable."
f"Model {model}: Unknown context window size and costs, using sane defaults."
)
possible_matches = fuzzy_match_models(model.name)
@ -563,12 +583,12 @@ def sanity_check_model(io, model):
for match in possible_matches:
fq, m = match
if fq == m:
io.tool_error(f"- {m}")
io.tool_output(f"- {m}")
else:
io.tool_error(f"- {m} ({fq})")
io.tool_output(f"- {m} ({fq})")
if show:
io.tool_error(urls.model_warnings)
io.tool_output(f"For more info, see: {urls.model_warnings}\n")
def fuzzy_match_models(name):

View file

@ -25,12 +25,14 @@ class GitRepo:
models=None,
attribute_author=True,
attribute_committer=True,
attribute_commit_message=False,
):
self.io = io
self.models = models
self.attribute_author = attribute_author
self.attribute_committer = attribute_committer
self.attribute_commit_message = attribute_commit_message
if git_dname:
check_fnames = [git_dname]
@ -84,6 +86,9 @@ class GitRepo:
else:
commit_message = self.get_commit_message(diffs, context)
if aider_edits and self.attribute_commit_message:
commit_message = "aider: " + commit_message
if not commit_message:
commit_message = "(no commit message provided)"

View file

@ -3,7 +3,6 @@ import json
import backoff
import httpx
import openai
from aider.dump import dump # noqa: F401
from aider.litellm import litellm
@ -85,5 +84,5 @@ def simple_send_with_retries(model_name, messages):
stream=False,
)
return response.choices[0].message.content
except (AttributeError, openai.BadRequestError):
except (AttributeError, litellm.exceptions.BadRequestError):
return

View file

@ -1,16 +1,15 @@
import tempfile
import unittest
from pathlib import Path
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock
import git
import openai
from aider.coders import Coder
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
from aider.models import Model
from aider.utils import ChdirTemporaryDirectory, GitTemporaryDirectory
from aider.utils import GitTemporaryDirectory
class TestCoder(unittest.TestCase):
@ -330,25 +329,6 @@ class TestCoder(unittest.TestCase):
# both files should still be here
self.assertEqual(len(coder.abs_fnames), 2)
def test_run_with_invalid_request_error(self):
with ChdirTemporaryDirectory():
# Mock the IO object
mock_io = MagicMock()
# Initialize the Coder object with the mocked IO and mocked repo
coder = Coder.create(self.GPT35, None, mock_io)
# Call the run method and assert that InvalidRequestError is raised
with self.assertRaises(openai.BadRequestError):
with patch("litellm.completion") as Mock:
Mock.side_effect = openai.BadRequestError(
message="Invalid request",
response=MagicMock(),
body=None,
)
coder.run(with_message="hi")
def test_new_file_edit_one_commit(self):
"""A new file shouldn't get pre-committed before the GPT edit commit"""
with GitTemporaryDirectory():

View file

@ -523,8 +523,6 @@ class TestCommands(TestCase):
other_path.write_text("other content")
repo.git.add(str(other_path))
os.environ["GIT_AUTHOR_NAME"] = "Foo (aider)"
# Create and commit a file
filename = "test_file.txt"
file_path = Path(repo_dir) / filename
@ -536,8 +534,6 @@ class TestCommands(TestCase):
repo.git.add(filename)
repo.git.commit("-m", "second commit")
del os.environ["GIT_AUTHOR_NAME"]
# Store the commit hash
last_commit_hash = repo.head.commit.hexsha[:7]
coder.last_aider_commit_hash = last_commit_hash

View file

@ -14,8 +14,8 @@ cog $ARG \
README.md \
website/index.md \
website/HISTORY.md \
website/docs/dotenv.md \
website/docs/commands.md \
website/docs/languages.md \
website/docs/options.md \
website/docs/aider_conf.md
website/docs/config/dotenv.md \
website/docs/config/options.md \
website/docs/config/aider_conf.md

View file

@ -7,10 +7,10 @@ with open("requirements.txt") as f:
from aider import __version__
with open("website/index.md", "r", encoding="utf-8") as f:
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
long_description = re.sub(r"\n!\[.*\]\(.*\)", "", long_description)
long_description = re.sub(r"\n- \[.*\]\(.*\)", "", long_description)
# long_description = re.sub(r"\n- \[.*\]\(.*\)", "", long_description)
setup(
name="aider-chat",

View file

@ -12,7 +12,11 @@ cog.out(text)
# Release history
### v0.40.2
### v0.40.6
- Fixed `/undo` so it works with `--no-attribute-author`.
### v0.40.5
- Bump versions to pickup latest litellm to fix streaming issue with Gemini
- https://github.com/BerriAI/litellm/issues/4408

View file

@ -1,18 +1,41 @@
## Unknown context window size and token costs
Aider tries to sanity check that it is configured correctly
to work with the LLM you specified:
```
Model foobar: Unknown context window size and costs, using sane defaults.
```
- It checks to see that all required environment variables are set for the model. These variables are required to configure things like API keys, API base URLs, etc.
These settings are required to be correct.
- It checks a metadata database to look up the context window size and token costs for the model.
It's usually OK if this extra metadata isn't available.
*You can probably ignore the unknown context window size and token costs warning.*
Sometimes one or both of these checks will fail, so aider will issue
some of the following warnings.
If you specify a model that aider has never heard of, you will get
this warning.
This means aider doesn't know the context window size and token costs
for that model.
Aider will use an unlimited context window and assume the model is free,
so this is not usually a significant problem.
See the docs on
[configuring advanced model settings](/docs/config/adv-model-settings.html)
for details on how to remove this warning.
## Did you mean?
If aider isn't familiar with the model you've specified,
it will suggest similarly named models.
This helps
in the case where you made a typo or mistake when specifying the model name.
```
Model gpt-5o: Unknown context window size and costs, using sane defaults.
Did you mean one of these?
- gpt-4o
```
## Missing environment variables
You need to set the listed environment variables.
Otherwise you will get error messages when you start chatting with the model.
```
Model azure/gpt-4-turbo: Missing these environment variables:
- AZURE_API_BASE
@ -20,8 +43,6 @@ Model azure/gpt-4-turbo: Missing these environment variables:
- AZURE_API_KEY
```
You need to set the listed environment variables.
Otherwise you will get error messages when you start chatting with the model.
## Unknown which environment variables are required
@ -34,24 +55,8 @@ Aider is unable verify the environment because it doesn't know
which variables are required for the model.
If required variables are missing,
you may get errors when you attempt to chat with the model.
You can look in the
[litellm provider documentation](https://docs.litellm.ai/docs/providers)
You can look in the [aider's LLM documentation](/docs/llms.html)
or the
[litellm documentation](https://docs.litellm.ai/docs/providers)
to see if the required variables are listed there.
## Context window size and token costs unavailable.
```
Model foobar: Unknown model, context window size and token costs unavailable.
```
If you specify a model that aider has never heard of, you will get an
"unknown model" warning.
This means aider doesn't know the context window size and token costs
for that model.
Some minor functionality will be limited when using such models, but
it's not really a significant problem.
Aider will also try to suggest similarly named models,
in case you made a typo or mistake when specifying the model name.

View file

@ -13,9 +13,6 @@
#######
# Main:
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file:
## Specify the OpenAI API key
#openai-api-key:
@ -103,6 +100,9 @@
## Restore the previous chat history messages (default: False)
#restore-chat-history: false
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file:
##################
# Output Settings:
@ -160,6 +160,9 @@
## Attribute aider commits in the git committer name (default: True)
#attribute-committer: true
## Prefix commit messages with 'aider: ' (default: False)
#attribute-commit-message: false
## Perform a dry run without modifying files (default: False)
#dry-run: false

View file

@ -21,9 +21,6 @@
#######
# Main:
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#AIDER_LLM_HISTORY_FILE=
## Specify the OpenAI API key
#OPENAI_API_KEY=
@ -111,6 +108,9 @@
## Restore the previous chat history messages (default: False)
#AIDER_RESTORE_CHAT_HISTORY=false
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#AIDER_LLM_HISTORY_FILE=
##################
# Output Settings:
@ -168,6 +168,9 @@
## Attribute aider commits in the git committer name (default: True)
#AIDER_ATTRIBUTE_COMMITTER=true
## Prefix commit messages with 'aider: ' (default: False)
#AIDER_ATTRIBUTE_COMMIT_MESSAGE=false
## Perform a dry run without modifying files (default: False)
#AIDER_DRY_RUN=false

View file

@ -11,24 +11,31 @@ command line switches.
Most options can also be set in an `.aider.conf.yml` file
which can be placed in your home directory or at the root of
your git repo.
Or via environment variables like `AIDER_xxx`,
as noted in the [options reference](options.html).
Or by setting environment variables like `AIDER_xxx`
either in your shell or a `.env` file.
Here are 3 equivalent ways of setting an option. First, via a command line switch:
Here are 4 equivalent ways of setting an option.
With a command line switch:
```
$ aider --dark-mode
```
Or, via an env variable:
```
export AIDER_DARK_MODE=true
```
Or in the `.aider.conf.yml` file:
Using a `.aider.conf.yml` file:
```yaml
dark-mode: true
```
By setting an environgment variable:
```
export AIDER_DARK_MODE=true
```
Using an `.env` file:
```
AIDER_DARK_MODE=true
```

View file

@ -0,0 +1,86 @@
---
parent: Configuration
nav_order: 950
description: Configuring advanced settings for LLMs.
---
# Advanced model settings
## Context window size and token costs
In most cases, you can safely ignore aider's warning about unknown context
window size and model costs.
But, you can register context window limits and costs for models that aren't known
to aider. Create a `.aider.litellm.models.json` file in one of these locations:
- Your home directory.
- The root if your git repo.
- The current directory where you launch aider.
- Or specify a specific file with the `--model-metadata-file <filename>` switch.
If the files above exist, they will be loaded in that order.
Files loaded last will take priority.
The json file should be a dictionary with an entry for each model, as follows:
```
{
"deepseek-chat": {
"max_tokens": 4096,
"max_input_tokens": 32000,
"max_output_tokens": 4096,
"input_cost_per_token": 0.00000014,
"output_cost_per_token": 0.00000028,
"litellm_provider": "deepseek",
"mode": "chat"
}
}
```
See
[litellm's model_prices_and_context_window.json file](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) for more examples.
## Model settings
Aider has a number of settings that control how it works with
different models.
These model settings are pre-configured for most popular models.
But it can sometimes be helpful to override them or add settings for
a model that aider doesn't know about.
To do that,
create a `.aider.models.yml` file in one of these locations:
- Your home directory.
- The root if your git repo.
- The current directory where you launch aider.
- Or specify a specific file with the `--model-settings-file <filename>` switch.
If the files above exist, they will be loaded in that order.
Files loaded last will take priority.
The yaml file should be a a list of dictionary objects for each model, as follows:
```
- name: "gpt-3.5-turbo"
edit_format: "whole"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: false
send_undo_reply: false
accepts_images: false
lazy: false
reminder_as_sys_msg: true
examples_as_sys_msg: false
- name: "gpt-4-turbo-2024-04-09"
edit_format: "udiff"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: true
send_undo_reply: true
accepts_images: true
lazy: true
reminder_as_sys_msg: true
examples_as_sys_msg: false
```

View file

@ -41,9 +41,6 @@ cog.outl("```")
#######
# Main:
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file:
## Specify the OpenAI API key
#openai-api-key:
@ -131,6 +128,9 @@ cog.outl("```")
## Restore the previous chat history messages (default: False)
#restore-chat-history: false
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#llm-history-file:
##################
# Output Settings:
@ -188,6 +188,9 @@ cog.outl("```")
## Attribute aider commits in the git committer name (default: True)
#attribute-committer: true
## Prefix commit messages with 'aider: ' (default: False)
#attribute-commit-message: false
## Perform a dry run without modifying files (default: False)
#dry-run: false

View file

@ -54,9 +54,6 @@ cog.outl("```")
#######
# Main:
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#AIDER_LLM_HISTORY_FILE=
## Specify the OpenAI API key
#OPENAI_API_KEY=
@ -144,6 +141,9 @@ cog.outl("```")
## Restore the previous chat history messages (default: False)
#AIDER_RESTORE_CHAT_HISTORY=false
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
#AIDER_LLM_HISTORY_FILE=
##################
# Output Settings:
@ -201,6 +201,9 @@ cog.outl("```")
## Attribute aider commits in the git committer name (default: True)
#AIDER_ATTRIBUTE_COMMITTER=true
## Prefix commit messages with 'aider: ' (default: False)
#AIDER_ATTRIBUTE_COMMIT_MESSAGE=false
## Perform a dry run without modifying files (default: False)
#AIDER_DRY_RUN=false

View file

@ -20,29 +20,29 @@ from aider.args import get_md_help
cog.out(get_md_help())
]]]-->
```
usage: aider [-h] [--llm-history-file] [--openai-api-key]
[--anthropic-api-key] [--model] [--opus] [--sonnet]
[--4] [--4o] [--4-turbo] [--35turbo] [--models]
[--openai-api-base] [--openai-api-type]
[--openai-api-version] [--openai-api-deployment-id]
[--openai-organization-id] [--model-settings-file]
[--model-metadata-file]
usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
[--opus] [--sonnet] [--4] [--4o] [--4-turbo]
[--35turbo] [--models] [--openai-api-base]
[--openai-api-type] [--openai-api-version]
[--openai-api-deployment-id] [--openai-organization-id]
[--model-settings-file] [--model-metadata-file]
[--verify-ssl | --no-verify-ssl] [--edit-format]
[--weak-model]
[--show-model-warnings | --no-show-model-warnings]
[--map-tokens] [--max-chat-history-tokens] [--env-file]
[--input-history-file] [--chat-history-file]
[--restore-chat-history | --no-restore-chat-history]
[--dark-mode] [--light-mode] [--pretty | --no-pretty]
[--stream | --no-stream] [--user-input-color]
[--tool-output-color] [--tool-error-color]
[--assistant-output-color] [--code-theme]
[--show-diffs] [--git | --no-git]
[--llm-history-file] [--dark-mode] [--light-mode]
[--pretty | --no-pretty] [--stream | --no-stream]
[--user-input-color] [--tool-output-color]
[--tool-error-color] [--assistant-output-color]
[--code-theme] [--show-diffs] [--git | --no-git]
[--gitignore | --no-gitignore] [--aiderignore]
[--auto-commits | --no-auto-commits]
[--dirty-commits | --no-dirty-commits]
[--attribute-author | --no-attribute-author]
[--attribute-committer | --no-attribute-committer]
[--attribute-commit-message | --no-attribute-commit-message]
[--dry-run | --no-dry-run] [--commit] [--lint]
[--lint-cmd] [--auto-lint | --no-auto-lint]
[--test-cmd] [--auto-test | --no-auto-test] [--test]
@ -63,10 +63,6 @@ Aliases:
## Main:
### `--llm-history-file LLM_HISTORY_FILE`
Log the conversation with the LLM to this file (for example, .aider.llm.history)
Environment variable: `AIDER_LLM_HISTORY_FILE`
### `--openai-api-key OPENAI_API_KEY`
Specify the OpenAI API key
Environment variable: `OPENAI_API_KEY`
@ -204,6 +200,10 @@ Aliases:
- `--restore-chat-history`
- `--no-restore-chat-history`
### `--llm-history-file LLM_HISTORY_FILE`
Log the conversation with the LLM to this file (for example, .aider.llm.history)
Environment variable: `AIDER_LLM_HISTORY_FILE`
## Output Settings:
### `--dark-mode`
@ -316,6 +316,14 @@ Aliases:
- `--attribute-committer`
- `--no-attribute-committer`
### `--attribute-commit-message`
Prefix commit messages with 'aider: ' (default: False)
Default: False
Environment variable: `AIDER_ATTRIBUTE_COMMIT_MESSAGE`
Aliases:
- `--attribute-commit-message`
- `--no-attribute-commit-message`
### `--dry-run`
Perform a dry run without modifying files (default: False)
Default: False

View file

@ -44,3 +44,6 @@ Aider marks commits that it either authored or committed.
You can use `--no-attribute-author` and `--no-attribute-committer` to disable
modification of the git author and committer name fields.
Additionally, you can use `--attribute-commit-message` to prefix commit messages with 'aider: '.
This option is disabled by default, but can be useful for easily identifying commits made by aider.

View file

@ -8,70 +8,3 @@ nav_order: 900
{% include model-warnings.md %}
## Adding settings for missing models
You can register model settings used by aider for unknown models.
Create a `.aider.models.yml` file in one of these locations:
- Your home directory.
- The root if your git repo.
- The current directory where you launch aider.
- Or specify a specific file with the `--model-settings-file <filename>` switch.
If the files above exist, they will be loaded in that order.
Files loaded last will take priority.
The yaml file should be a a list of dictionary objects for each model, as follows:
```
- name: "gpt-3.5-turbo"
edit_format: "whole"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: false
send_undo_reply: false
accepts_images: false
lazy: false
reminder_as_sys_msg: true
examples_as_sys_msg: false
- name: "gpt-4-turbo-2024-04-09"
edit_format: "udiff"
weak_model_name: "gpt-3.5-turbo"
use_repo_map: true
send_undo_reply: true
accepts_images: true
lazy: true
reminder_as_sys_msg: true
examples_as_sys_msg: false
```
## Specifying context window size and token costs
You can register context window limits and costs for models that aren't known
to aider. Create a `.aider.litellm.models.json` file in one of these locations:
- Your home directory.
- The root if your git repo.
- The current directory where you launch aider.
- Or specify a specific file with the `--model-metadata-file <filename>` switch.
If the files above exist, they will be loaded in that order.
Files loaded last will take priority.
The json file should be a dictionary with an entry for each model, as follows:
```
{
"deepseek-chat": {
"max_tokens": 4096,
"max_input_tokens": 32000,
"max_output_tokens": 4096,
"input_cost_per_token": 0.00000014,
"output_cost_per_token": 0.00000028,
"litellm_provider": "deepseek",
"mode": "chat"
}
}
```
See
[litellm's model_prices_and_context_window.json file](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) for more examples.

View file

@ -21,8 +21,8 @@ In these cases, here are some things you might try.
## Use a capable model
If possible try using GPT-4o or Opus, as they are the strongest and most
capable models.
If possible try using GPT-4o, Claude 3.5 Sonnet or Claude 3 Opus,
as they are the strongest and most capable models.
Weaker models
are more prone to

View file

@ -37,26 +37,31 @@ Use /help to see in-chat commands, run with --help to see cmd line args
## Adding files
Just add the files that the aider will need to *edit*.
Add the files that the aider will need to *edit*.
Don't add a bunch of extra files.
If you add too many files, the LLM can get overwhelmed
and confused (and it costs more tokens).
Aider will automatically
pull in content from related files so that it can
[understand the rest of your code base](https://aider.chat/docs/repomap.html).
You can also run aider without naming any files and use the in-chat
You add files to the chat by naming them on the aider command line.
Or, you can use the in-chat
`/add` command to add files.
Or you can skip adding files completely, and aider
will try to figure out which files need to be edited based
You can use aider without adding any files,
and it will try to figure out which files need to be edited based
on your requests.
But you'll get the best results if you add the files that need
to edited.
## LLMs
Aider uses GPT-4o by default, but you can
[connect to many different LLMs](/docs/llms.html).
Claude 3 Opus is another model which works very well with aider,
which you can use by running `aider --opus`.
Claude 3.5 Sonnet also works very well with aider,
which you can use by running `aider --sonnet`.
You can run `aider --model XXX` to launch aider with
a specific model.
@ -68,8 +73,8 @@ Or, during your chat you can switch models with the in-chat
Ask aider to make changes to your code.
It will show you some diffs of the changes it is making to
complete you request.
Aider will git commit all of its changes,
[Aider will git commit all of its changes](/docs/git.html),
so they are easy to track and undo.
You can always use the `/undo` command to undo changes you don't
You can always use the `/undo` command to undo AI changes that you don't
like.