mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-03 03:05:00 +00:00
Merge remote-tracking branch 'refs/remotes/origin/main'
This commit is contained in:
commit
e695a1131a
27 changed files with 314 additions and 220 deletions
|
@ -1,7 +1,11 @@
|
||||||
|
|
||||||
# Release history
|
# Release history
|
||||||
|
|
||||||
### v0.40.2
|
### v0.40.6
|
||||||
|
|
||||||
|
- Fixed `/undo` so it works with `--no-attribute-author`.
|
||||||
|
|
||||||
|
### v0.40.5
|
||||||
|
|
||||||
- Bump versions to pickup latest litellm to fix streaming issue with Gemini
|
- Bump versions to pickup latest litellm to fix streaming issue with Gemini
|
||||||
- https://github.com/BerriAI/litellm/issues/4408
|
- https://github.com/BerriAI/litellm/issues/4408
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
__version__ = "0.40.6-dev"
|
__version__ = "0.40.7-dev"
|
||||||
|
|
|
@ -29,12 +29,6 @@ def get_parser(default_config_files, git_root):
|
||||||
auto_env_var_prefix="AIDER_",
|
auto_env_var_prefix="AIDER_",
|
||||||
)
|
)
|
||||||
group = parser.add_argument_group("Main")
|
group = parser.add_argument_group("Main")
|
||||||
group.add_argument(
|
|
||||||
"--llm-history-file",
|
|
||||||
metavar="LLM_HISTORY_FILE",
|
|
||||||
default=None,
|
|
||||||
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
|
|
||||||
)
|
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
|
"files", metavar="FILE", nargs="*", help="files to edit with an LLM (optional)"
|
||||||
)
|
)
|
||||||
|
@ -236,6 +230,12 @@ def get_parser(default_config_files, git_root):
|
||||||
default=False,
|
default=False,
|
||||||
help="Restore the previous chat history messages (default: False)",
|
help="Restore the previous chat history messages (default: False)",
|
||||||
)
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--llm-history-file",
|
||||||
|
metavar="LLM_HISTORY_FILE",
|
||||||
|
default=None,
|
||||||
|
help="Log the conversation with the LLM to this file (for example, .aider.llm.history)",
|
||||||
|
)
|
||||||
|
|
||||||
##########
|
##########
|
||||||
group = parser.add_argument_group("Output Settings")
|
group = parser.add_argument_group("Output Settings")
|
||||||
|
@ -345,6 +345,12 @@ def get_parser(default_config_files, git_root):
|
||||||
default=True,
|
default=True,
|
||||||
help="Attribute aider commits in the git committer name (default: True)",
|
help="Attribute aider commits in the git committer name (default: True)",
|
||||||
)
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--attribute-commit-message",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
default=False,
|
||||||
|
help="Prefix commit messages with 'aider: ' (default: False)",
|
||||||
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--dry-run",
|
"--dry-run",
|
||||||
action=argparse.BooleanOptionalAction,
|
action=argparse.BooleanOptionalAction,
|
||||||
|
|
|
@ -13,7 +13,6 @@ from json.decoder import JSONDecodeError
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import git
|
import git
|
||||||
import openai
|
|
||||||
from jsonschema import Draft7Validator
|
from jsonschema import Draft7Validator
|
||||||
from rich.console import Console, Text
|
from rich.console import Console, Text
|
||||||
from rich.markdown import Markdown
|
from rich.markdown import Markdown
|
||||||
|
@ -37,7 +36,7 @@ class MissingAPIKeyError(ValueError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ExhaustedContextWindow(Exception):
|
class FinishReasonLength(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -221,6 +220,7 @@ class Coder:
|
||||||
test_cmd=None,
|
test_cmd=None,
|
||||||
attribute_author=True,
|
attribute_author=True,
|
||||||
attribute_committer=True,
|
attribute_committer=True,
|
||||||
|
attribute_commit_message=False,
|
||||||
):
|
):
|
||||||
if not fnames:
|
if not fnames:
|
||||||
fnames = []
|
fnames = []
|
||||||
|
@ -280,6 +280,7 @@ class Coder:
|
||||||
models=main_model.commit_message_models(),
|
models=main_model.commit_message_models(),
|
||||||
attribute_author=attribute_author,
|
attribute_author=attribute_author,
|
||||||
attribute_committer=attribute_committer,
|
attribute_committer=attribute_committer,
|
||||||
|
attribute_commit_message=attribute_commit_message,
|
||||||
)
|
)
|
||||||
self.root = self.repo.root
|
self.root = self.repo.root
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
|
@ -810,28 +811,43 @@ class Coder:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
utils.show_messages(messages, functions=self.functions)
|
utils.show_messages(messages, functions=self.functions)
|
||||||
|
|
||||||
|
multi_response_content = ""
|
||||||
exhausted = False
|
exhausted = False
|
||||||
interrupted = False
|
interrupted = False
|
||||||
try:
|
while True:
|
||||||
yield from self.send(messages, functions=self.functions)
|
try:
|
||||||
except KeyboardInterrupt:
|
yield from self.send(messages, functions=self.functions)
|
||||||
interrupted = True
|
break
|
||||||
except ExhaustedContextWindow:
|
except KeyboardInterrupt:
|
||||||
exhausted = True
|
interrupted = True
|
||||||
except litellm.exceptions.BadRequestError as err:
|
break
|
||||||
if "ContextWindowExceededError" in err.message:
|
except litellm.ContextWindowExceededError:
|
||||||
|
# The input is overflowing the context window!
|
||||||
exhausted = True
|
exhausted = True
|
||||||
else:
|
break
|
||||||
self.io.tool_error(f"BadRequestError: {err}")
|
except litellm.exceptions.BadRequestError as br_err:
|
||||||
|
self.io.tool_error(f"BadRequestError: {br_err}")
|
||||||
return
|
return
|
||||||
except openai.BadRequestError as err:
|
except FinishReasonLength:
|
||||||
if "maximum context length" in str(err):
|
# We hit the 4k output limit!
|
||||||
exhausted = True
|
if not self.main_model.can_prefill:
|
||||||
else:
|
exhausted = True
|
||||||
raise err
|
break
|
||||||
except Exception as err:
|
|
||||||
self.io.tool_error(f"Unexpected error: {err}")
|
# Use prefill to continue the response
|
||||||
return
|
multi_response_content += self.partial_response_content
|
||||||
|
if messages[-1]["role"] == "assistant":
|
||||||
|
messages[-1]["content"] = multi_response_content
|
||||||
|
else:
|
||||||
|
messages.append(dict(role="assistant", content=multi_response_content))
|
||||||
|
except Exception as err:
|
||||||
|
self.io.tool_error(f"Unexpected error: {err}")
|
||||||
|
traceback.print_exc()
|
||||||
|
return
|
||||||
|
|
||||||
|
if multi_response_content:
|
||||||
|
multi_response_content += self.partial_response_content
|
||||||
|
self.partial_response_content = multi_response_content
|
||||||
|
|
||||||
if exhausted:
|
if exhausted:
|
||||||
self.show_exhausted_error()
|
self.show_exhausted_error()
|
||||||
|
@ -1101,7 +1117,7 @@ class Coder:
|
||||||
if show_func_err and show_content_err:
|
if show_func_err and show_content_err:
|
||||||
self.io.tool_error(show_func_err)
|
self.io.tool_error(show_func_err)
|
||||||
self.io.tool_error(show_content_err)
|
self.io.tool_error(show_content_err)
|
||||||
raise Exception("No data found in openai response!")
|
raise Exception("No data found in LLM response!")
|
||||||
|
|
||||||
tokens = None
|
tokens = None
|
||||||
if hasattr(completion, "usage") and completion.usage is not None:
|
if hasattr(completion, "usage") and completion.usage is not None:
|
||||||
|
@ -1129,6 +1145,12 @@ class Coder:
|
||||||
if tokens is not None:
|
if tokens is not None:
|
||||||
self.io.tool_output(tokens)
|
self.io.tool_output(tokens)
|
||||||
|
|
||||||
|
if (
|
||||||
|
hasattr(completion.choices[0], "finish_reason")
|
||||||
|
and completion.choices[0].finish_reason == "length"
|
||||||
|
):
|
||||||
|
raise FinishReasonLength()
|
||||||
|
|
||||||
def show_send_output_stream(self, completion):
|
def show_send_output_stream(self, completion):
|
||||||
if self.show_pretty():
|
if self.show_pretty():
|
||||||
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
|
mdargs = dict(style=self.assistant_output_color, code_theme=self.code_theme)
|
||||||
|
@ -1145,7 +1167,7 @@ class Coder:
|
||||||
hasattr(chunk.choices[0], "finish_reason")
|
hasattr(chunk.choices[0], "finish_reason")
|
||||||
and chunk.choices[0].finish_reason == "length"
|
and chunk.choices[0].finish_reason == "length"
|
||||||
):
|
):
|
||||||
raise ExhaustedContextWindow()
|
raise FinishReasonLength()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
func = chunk.choices[0].delta.function_call
|
func = chunk.choices[0].delta.function_call
|
||||||
|
|
|
@ -331,10 +331,7 @@ class Commands:
|
||||||
return
|
return
|
||||||
|
|
||||||
last_commit = self.coder.repo.repo.head.commit
|
last_commit = self.coder.repo.repo.head.commit
|
||||||
if (
|
if last_commit.hexsha[:7] != self.coder.last_aider_commit_hash:
|
||||||
not last_commit.author.name.endswith(" (aider)")
|
|
||||||
or last_commit.hexsha[:7] != self.coder.last_aider_commit_hash
|
|
||||||
):
|
|
||||||
self.io.tool_error("The last commit was not made by aider in this chat session.")
|
self.io.tool_error("The last commit was not made by aider in this chat session.")
|
||||||
self.io.tool_error(
|
self.io.tool_error(
|
||||||
"You could try `/git reset --hard HEAD^` but be aware that this is a destructive"
|
"You could try `/git reset --hard HEAD^` but be aware that this is a destructive"
|
||||||
|
|
|
@ -17,9 +17,10 @@ from aider.scrape import Scraper
|
||||||
class CaptureIO(InputOutput):
|
class CaptureIO(InputOutput):
|
||||||
lines = []
|
lines = []
|
||||||
|
|
||||||
def tool_output(self, msg):
|
def tool_output(self, msg, log_only=False):
|
||||||
self.lines.append(msg)
|
if not log_only:
|
||||||
super().tool_output(msg)
|
self.lines.append(msg)
|
||||||
|
super().tool_output(msg, log_only=log_only)
|
||||||
|
|
||||||
def tool_error(self, msg):
|
def tool_error(self, msg):
|
||||||
self.lines.append(msg)
|
self.lines.append(msg)
|
||||||
|
|
|
@ -441,6 +441,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
||||||
test_cmd=args.test_cmd,
|
test_cmd=args.test_cmd,
|
||||||
attribute_author=args.attribute_author,
|
attribute_author=args.attribute_author,
|
||||||
attribute_committer=args.attribute_committer,
|
attribute_committer=args.attribute_committer,
|
||||||
|
attribute_commit_message=args.attribute_commit_message,
|
||||||
)
|
)
|
||||||
|
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
|
|
|
@ -27,6 +27,7 @@ class ModelSettings:
|
||||||
lazy: bool = False
|
lazy: bool = False
|
||||||
reminder_as_sys_msg: bool = False
|
reminder_as_sys_msg: bool = False
|
||||||
examples_as_sys_msg: bool = False
|
examples_as_sys_msg: bool = False
|
||||||
|
can_prefill: bool = False
|
||||||
|
|
||||||
|
|
||||||
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
||||||
|
@ -166,6 +167,7 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="claude-3-haiku-20240307",
|
weak_model_name="claude-3-haiku-20240307",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"openrouter/anthropic/claude-3-opus",
|
"openrouter/anthropic/claude-3-opus",
|
||||||
|
@ -173,11 +175,13 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="openrouter/anthropic/claude-3-haiku",
|
weak_model_name="openrouter/anthropic/claude-3-haiku",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"claude-3-sonnet-20240229",
|
"claude-3-sonnet-20240229",
|
||||||
"whole",
|
"whole",
|
||||||
weak_model_name="claude-3-haiku-20240307",
|
weak_model_name="claude-3-haiku-20240307",
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"claude-3-5-sonnet-20240620",
|
"claude-3-5-sonnet-20240620",
|
||||||
|
@ -185,6 +189,7 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="claude-3-haiku-20240307",
|
weak_model_name="claude-3-haiku-20240307",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
examples_as_sys_msg=True,
|
examples_as_sys_msg=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"anthropic/claude-3-5-sonnet-20240620",
|
"anthropic/claude-3-5-sonnet-20240620",
|
||||||
|
@ -192,6 +197,7 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="claude-3-haiku-20240307",
|
weak_model_name="claude-3-haiku-20240307",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
examples_as_sys_msg=True,
|
examples_as_sys_msg=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"openrouter/anthropic/claude-3.5-sonnet",
|
"openrouter/anthropic/claude-3.5-sonnet",
|
||||||
|
@ -199,6 +205,7 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="openrouter/anthropic/claude-3-haiku-20240307",
|
weak_model_name="openrouter/anthropic/claude-3-haiku-20240307",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
examples_as_sys_msg=True,
|
examples_as_sys_msg=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
# Vertex AI Claude models
|
# Vertex AI Claude models
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
|
@ -206,6 +213,8 @@ MODEL_SETTINGS = [
|
||||||
"diff",
|
"diff",
|
||||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
|
examples_as_sys_msg=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"vertex_ai/claude-3-opus@20240229",
|
"vertex_ai/claude-3-opus@20240229",
|
||||||
|
@ -213,11 +222,13 @@ MODEL_SETTINGS = [
|
||||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"vertex_ai/claude-3-sonnet@20240229",
|
"vertex_ai/claude-3-sonnet@20240229",
|
||||||
"whole",
|
"whole",
|
||||||
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
weak_model_name="vertex_ai/claude-3-haiku@20240307",
|
||||||
|
can_prefill=True,
|
||||||
),
|
),
|
||||||
# Cohere
|
# Cohere
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
|
@ -328,7 +339,7 @@ class Model:
|
||||||
self.missing_keys = res.get("missing_keys")
|
self.missing_keys = res.get("missing_keys")
|
||||||
self.keys_in_environment = res.get("keys_in_environment")
|
self.keys_in_environment = res.get("keys_in_environment")
|
||||||
|
|
||||||
max_input_tokens = self.info.get("max_input_tokens", 0)
|
max_input_tokens = self.info.get("max_input_tokens")
|
||||||
if not max_input_tokens:
|
if not max_input_tokens:
|
||||||
max_input_tokens = 0
|
max_input_tokens = 0
|
||||||
if max_input_tokens < 32 * 1024:
|
if max_input_tokens < 32 * 1024:
|
||||||
|
@ -375,6 +386,15 @@ class Model:
|
||||||
if "gpt-3.5" in model or "gpt-4" in model:
|
if "gpt-3.5" in model or "gpt-4" in model:
|
||||||
self.reminder_as_sys_msg = True
|
self.reminder_as_sys_msg = True
|
||||||
|
|
||||||
|
if "anthropic" in model:
|
||||||
|
self.can_prefill = True
|
||||||
|
|
||||||
|
if "3.5-sonnet" in model or "3-5-sonnet" in model:
|
||||||
|
self.edit_format = "diff"
|
||||||
|
self.use_repo_map = True
|
||||||
|
self.examples_as_sys_msg = True
|
||||||
|
self.can_prefill = True
|
||||||
|
|
||||||
# use the defaults
|
# use the defaults
|
||||||
if self.edit_format == "diff":
|
if self.edit_format == "diff":
|
||||||
self.use_repo_map = True
|
self.use_repo_map = True
|
||||||
|
@ -554,7 +574,7 @@ def sanity_check_model(io, model):
|
||||||
if not model.info:
|
if not model.info:
|
||||||
show = True
|
show = True
|
||||||
io.tool_output(
|
io.tool_output(
|
||||||
f"Model {model}: Unknown model, context window size and token costs unavailable."
|
f"Model {model}: Unknown context window size and costs, using sane defaults."
|
||||||
)
|
)
|
||||||
|
|
||||||
possible_matches = fuzzy_match_models(model.name)
|
possible_matches = fuzzy_match_models(model.name)
|
||||||
|
@ -563,12 +583,12 @@ def sanity_check_model(io, model):
|
||||||
for match in possible_matches:
|
for match in possible_matches:
|
||||||
fq, m = match
|
fq, m = match
|
||||||
if fq == m:
|
if fq == m:
|
||||||
io.tool_error(f"- {m}")
|
io.tool_output(f"- {m}")
|
||||||
else:
|
else:
|
||||||
io.tool_error(f"- {m} ({fq})")
|
io.tool_output(f"- {m} ({fq})")
|
||||||
|
|
||||||
if show:
|
if show:
|
||||||
io.tool_error(urls.model_warnings)
|
io.tool_output(f"For more info, see: {urls.model_warnings}\n")
|
||||||
|
|
||||||
|
|
||||||
def fuzzy_match_models(name):
|
def fuzzy_match_models(name):
|
||||||
|
|
|
@ -25,12 +25,14 @@ class GitRepo:
|
||||||
models=None,
|
models=None,
|
||||||
attribute_author=True,
|
attribute_author=True,
|
||||||
attribute_committer=True,
|
attribute_committer=True,
|
||||||
|
attribute_commit_message=False,
|
||||||
):
|
):
|
||||||
self.io = io
|
self.io = io
|
||||||
self.models = models
|
self.models = models
|
||||||
|
|
||||||
self.attribute_author = attribute_author
|
self.attribute_author = attribute_author
|
||||||
self.attribute_committer = attribute_committer
|
self.attribute_committer = attribute_committer
|
||||||
|
self.attribute_commit_message = attribute_commit_message
|
||||||
|
|
||||||
if git_dname:
|
if git_dname:
|
||||||
check_fnames = [git_dname]
|
check_fnames = [git_dname]
|
||||||
|
@ -84,6 +86,9 @@ class GitRepo:
|
||||||
else:
|
else:
|
||||||
commit_message = self.get_commit_message(diffs, context)
|
commit_message = self.get_commit_message(diffs, context)
|
||||||
|
|
||||||
|
if aider_edits and self.attribute_commit_message:
|
||||||
|
commit_message = "aider: " + commit_message
|
||||||
|
|
||||||
if not commit_message:
|
if not commit_message:
|
||||||
commit_message = "(no commit message provided)"
|
commit_message = "(no commit message provided)"
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ import json
|
||||||
|
|
||||||
import backoff
|
import backoff
|
||||||
import httpx
|
import httpx
|
||||||
import openai
|
|
||||||
|
|
||||||
from aider.dump import dump # noqa: F401
|
from aider.dump import dump # noqa: F401
|
||||||
from aider.litellm import litellm
|
from aider.litellm import litellm
|
||||||
|
@ -85,5 +84,5 @@ def simple_send_with_retries(model_name, messages):
|
||||||
stream=False,
|
stream=False,
|
||||||
)
|
)
|
||||||
return response.choices[0].message.content
|
return response.choices[0].message.content
|
||||||
except (AttributeError, openai.BadRequestError):
|
except (AttributeError, litellm.exceptions.BadRequestError):
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,16 +1,15 @@
|
||||||
import tempfile
|
import tempfile
|
||||||
import unittest
|
import unittest
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
import git
|
import git
|
||||||
import openai
|
|
||||||
|
|
||||||
from aider.coders import Coder
|
from aider.coders import Coder
|
||||||
from aider.dump import dump # noqa: F401
|
from aider.dump import dump # noqa: F401
|
||||||
from aider.io import InputOutput
|
from aider.io import InputOutput
|
||||||
from aider.models import Model
|
from aider.models import Model
|
||||||
from aider.utils import ChdirTemporaryDirectory, GitTemporaryDirectory
|
from aider.utils import GitTemporaryDirectory
|
||||||
|
|
||||||
|
|
||||||
class TestCoder(unittest.TestCase):
|
class TestCoder(unittest.TestCase):
|
||||||
|
@ -330,25 +329,6 @@ class TestCoder(unittest.TestCase):
|
||||||
# both files should still be here
|
# both files should still be here
|
||||||
self.assertEqual(len(coder.abs_fnames), 2)
|
self.assertEqual(len(coder.abs_fnames), 2)
|
||||||
|
|
||||||
def test_run_with_invalid_request_error(self):
|
|
||||||
with ChdirTemporaryDirectory():
|
|
||||||
# Mock the IO object
|
|
||||||
mock_io = MagicMock()
|
|
||||||
|
|
||||||
# Initialize the Coder object with the mocked IO and mocked repo
|
|
||||||
coder = Coder.create(self.GPT35, None, mock_io)
|
|
||||||
|
|
||||||
# Call the run method and assert that InvalidRequestError is raised
|
|
||||||
with self.assertRaises(openai.BadRequestError):
|
|
||||||
with patch("litellm.completion") as Mock:
|
|
||||||
Mock.side_effect = openai.BadRequestError(
|
|
||||||
message="Invalid request",
|
|
||||||
response=MagicMock(),
|
|
||||||
body=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
coder.run(with_message="hi")
|
|
||||||
|
|
||||||
def test_new_file_edit_one_commit(self):
|
def test_new_file_edit_one_commit(self):
|
||||||
"""A new file shouldn't get pre-committed before the GPT edit commit"""
|
"""A new file shouldn't get pre-committed before the GPT edit commit"""
|
||||||
with GitTemporaryDirectory():
|
with GitTemporaryDirectory():
|
||||||
|
|
|
@ -523,8 +523,6 @@ class TestCommands(TestCase):
|
||||||
other_path.write_text("other content")
|
other_path.write_text("other content")
|
||||||
repo.git.add(str(other_path))
|
repo.git.add(str(other_path))
|
||||||
|
|
||||||
os.environ["GIT_AUTHOR_NAME"] = "Foo (aider)"
|
|
||||||
|
|
||||||
# Create and commit a file
|
# Create and commit a file
|
||||||
filename = "test_file.txt"
|
filename = "test_file.txt"
|
||||||
file_path = Path(repo_dir) / filename
|
file_path = Path(repo_dir) / filename
|
||||||
|
@ -536,8 +534,6 @@ class TestCommands(TestCase):
|
||||||
repo.git.add(filename)
|
repo.git.add(filename)
|
||||||
repo.git.commit("-m", "second commit")
|
repo.git.commit("-m", "second commit")
|
||||||
|
|
||||||
del os.environ["GIT_AUTHOR_NAME"]
|
|
||||||
|
|
||||||
# Store the commit hash
|
# Store the commit hash
|
||||||
last_commit_hash = repo.head.commit.hexsha[:7]
|
last_commit_hash = repo.head.commit.hexsha[:7]
|
||||||
coder.last_aider_commit_hash = last_commit_hash
|
coder.last_aider_commit_hash = last_commit_hash
|
||||||
|
|
|
@ -14,8 +14,8 @@ cog $ARG \
|
||||||
README.md \
|
README.md \
|
||||||
website/index.md \
|
website/index.md \
|
||||||
website/HISTORY.md \
|
website/HISTORY.md \
|
||||||
website/docs/dotenv.md \
|
|
||||||
website/docs/commands.md \
|
website/docs/commands.md \
|
||||||
website/docs/languages.md \
|
website/docs/languages.md \
|
||||||
website/docs/options.md \
|
website/docs/config/dotenv.md \
|
||||||
website/docs/aider_conf.md
|
website/docs/config/options.md \
|
||||||
|
website/docs/config/aider_conf.md
|
||||||
|
|
4
setup.py
4
setup.py
|
@ -7,10 +7,10 @@ with open("requirements.txt") as f:
|
||||||
|
|
||||||
from aider import __version__
|
from aider import __version__
|
||||||
|
|
||||||
with open("website/index.md", "r", encoding="utf-8") as f:
|
with open("README.md", "r", encoding="utf-8") as f:
|
||||||
long_description = f.read()
|
long_description = f.read()
|
||||||
long_description = re.sub(r"\n!\[.*\]\(.*\)", "", long_description)
|
long_description = re.sub(r"\n!\[.*\]\(.*\)", "", long_description)
|
||||||
long_description = re.sub(r"\n- \[.*\]\(.*\)", "", long_description)
|
# long_description = re.sub(r"\n- \[.*\]\(.*\)", "", long_description)
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="aider-chat",
|
name="aider-chat",
|
||||||
|
|
|
@ -12,7 +12,11 @@ cog.out(text)
|
||||||
|
|
||||||
# Release history
|
# Release history
|
||||||
|
|
||||||
### v0.40.2
|
### v0.40.6
|
||||||
|
|
||||||
|
- Fixed `/undo` so it works with `--no-attribute-author`.
|
||||||
|
|
||||||
|
### v0.40.5
|
||||||
|
|
||||||
- Bump versions to pickup latest litellm to fix streaming issue with Gemini
|
- Bump versions to pickup latest litellm to fix streaming issue with Gemini
|
||||||
- https://github.com/BerriAI/litellm/issues/4408
|
- https://github.com/BerriAI/litellm/issues/4408
|
||||||
|
|
|
@ -1,18 +1,41 @@
|
||||||
|
|
||||||
|
## Unknown context window size and token costs
|
||||||
|
|
||||||
Aider tries to sanity check that it is configured correctly
|
```
|
||||||
to work with the LLM you specified:
|
Model foobar: Unknown context window size and costs, using sane defaults.
|
||||||
|
```
|
||||||
|
|
||||||
- It checks to see that all required environment variables are set for the model. These variables are required to configure things like API keys, API base URLs, etc.
|
*You can probably ignore the unknown context window size and token costs warning.*
|
||||||
These settings are required to be correct.
|
|
||||||
- It checks a metadata database to look up the context window size and token costs for the model.
|
|
||||||
It's usually OK if this extra metadata isn't available.
|
|
||||||
|
|
||||||
Sometimes one or both of these checks will fail, so aider will issue
|
If you specify a model that aider has never heard of, you will get
|
||||||
some of the following warnings.
|
this warning.
|
||||||
|
This means aider doesn't know the context window size and token costs
|
||||||
|
for that model.
|
||||||
|
Aider will use an unlimited context window and assume the model is free,
|
||||||
|
so this is not usually a significant problem.
|
||||||
|
|
||||||
|
See the docs on
|
||||||
|
[configuring advanced model settings](/docs/config/adv-model-settings.html)
|
||||||
|
for details on how to remove this warning.
|
||||||
|
|
||||||
|
## Did you mean?
|
||||||
|
|
||||||
|
If aider isn't familiar with the model you've specified,
|
||||||
|
it will suggest similarly named models.
|
||||||
|
This helps
|
||||||
|
in the case where you made a typo or mistake when specifying the model name.
|
||||||
|
|
||||||
|
```
|
||||||
|
Model gpt-5o: Unknown context window size and costs, using sane defaults.
|
||||||
|
Did you mean one of these?
|
||||||
|
- gpt-4o
|
||||||
|
```
|
||||||
|
|
||||||
## Missing environment variables
|
## Missing environment variables
|
||||||
|
|
||||||
|
You need to set the listed environment variables.
|
||||||
|
Otherwise you will get error messages when you start chatting with the model.
|
||||||
|
|
||||||
```
|
```
|
||||||
Model azure/gpt-4-turbo: Missing these environment variables:
|
Model azure/gpt-4-turbo: Missing these environment variables:
|
||||||
- AZURE_API_BASE
|
- AZURE_API_BASE
|
||||||
|
@ -20,8 +43,6 @@ Model azure/gpt-4-turbo: Missing these environment variables:
|
||||||
- AZURE_API_KEY
|
- AZURE_API_KEY
|
||||||
```
|
```
|
||||||
|
|
||||||
You need to set the listed environment variables.
|
|
||||||
Otherwise you will get error messages when you start chatting with the model.
|
|
||||||
|
|
||||||
|
|
||||||
## Unknown which environment variables are required
|
## Unknown which environment variables are required
|
||||||
|
@ -34,24 +55,8 @@ Aider is unable verify the environment because it doesn't know
|
||||||
which variables are required for the model.
|
which variables are required for the model.
|
||||||
If required variables are missing,
|
If required variables are missing,
|
||||||
you may get errors when you attempt to chat with the model.
|
you may get errors when you attempt to chat with the model.
|
||||||
You can look in the
|
You can look in the [aider's LLM documentation](/docs/llms.html)
|
||||||
[litellm provider documentation](https://docs.litellm.ai/docs/providers)
|
or the
|
||||||
|
[litellm documentation](https://docs.litellm.ai/docs/providers)
|
||||||
to see if the required variables are listed there.
|
to see if the required variables are listed there.
|
||||||
|
|
||||||
## Context window size and token costs unavailable.
|
|
||||||
|
|
||||||
```
|
|
||||||
Model foobar: Unknown model, context window size and token costs unavailable.
|
|
||||||
```
|
|
||||||
|
|
||||||
If you specify a model that aider has never heard of, you will get an
|
|
||||||
"unknown model" warning.
|
|
||||||
This means aider doesn't know the context window size and token costs
|
|
||||||
for that model.
|
|
||||||
Some minor functionality will be limited when using such models, but
|
|
||||||
it's not really a significant problem.
|
|
||||||
|
|
||||||
Aider will also try to suggest similarly named models,
|
|
||||||
in case you made a typo or mistake when specifying the model name.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -13,9 +13,6 @@
|
||||||
#######
|
#######
|
||||||
# Main:
|
# Main:
|
||||||
|
|
||||||
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
|
||||||
#llm-history-file:
|
|
||||||
|
|
||||||
## Specify the OpenAI API key
|
## Specify the OpenAI API key
|
||||||
#openai-api-key:
|
#openai-api-key:
|
||||||
|
|
||||||
|
@ -103,6 +100,9 @@
|
||||||
## Restore the previous chat history messages (default: False)
|
## Restore the previous chat history messages (default: False)
|
||||||
#restore-chat-history: false
|
#restore-chat-history: false
|
||||||
|
|
||||||
|
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
||||||
|
#llm-history-file:
|
||||||
|
|
||||||
##################
|
##################
|
||||||
# Output Settings:
|
# Output Settings:
|
||||||
|
|
||||||
|
@ -160,6 +160,9 @@
|
||||||
## Attribute aider commits in the git committer name (default: True)
|
## Attribute aider commits in the git committer name (default: True)
|
||||||
#attribute-committer: true
|
#attribute-committer: true
|
||||||
|
|
||||||
|
## Prefix commit messages with 'aider: ' (default: False)
|
||||||
|
#attribute-commit-message: false
|
||||||
|
|
||||||
## Perform a dry run without modifying files (default: False)
|
## Perform a dry run without modifying files (default: False)
|
||||||
#dry-run: false
|
#dry-run: false
|
||||||
|
|
||||||
|
|
|
@ -21,9 +21,6 @@
|
||||||
#######
|
#######
|
||||||
# Main:
|
# Main:
|
||||||
|
|
||||||
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
|
||||||
#AIDER_LLM_HISTORY_FILE=
|
|
||||||
|
|
||||||
## Specify the OpenAI API key
|
## Specify the OpenAI API key
|
||||||
#OPENAI_API_KEY=
|
#OPENAI_API_KEY=
|
||||||
|
|
||||||
|
@ -111,6 +108,9 @@
|
||||||
## Restore the previous chat history messages (default: False)
|
## Restore the previous chat history messages (default: False)
|
||||||
#AIDER_RESTORE_CHAT_HISTORY=false
|
#AIDER_RESTORE_CHAT_HISTORY=false
|
||||||
|
|
||||||
|
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
||||||
|
#AIDER_LLM_HISTORY_FILE=
|
||||||
|
|
||||||
##################
|
##################
|
||||||
# Output Settings:
|
# Output Settings:
|
||||||
|
|
||||||
|
@ -168,6 +168,9 @@
|
||||||
## Attribute aider commits in the git committer name (default: True)
|
## Attribute aider commits in the git committer name (default: True)
|
||||||
#AIDER_ATTRIBUTE_COMMITTER=true
|
#AIDER_ATTRIBUTE_COMMITTER=true
|
||||||
|
|
||||||
|
## Prefix commit messages with 'aider: ' (default: False)
|
||||||
|
#AIDER_ATTRIBUTE_COMMIT_MESSAGE=false
|
||||||
|
|
||||||
## Perform a dry run without modifying files (default: False)
|
## Perform a dry run without modifying files (default: False)
|
||||||
#AIDER_DRY_RUN=false
|
#AIDER_DRY_RUN=false
|
||||||
|
|
||||||
|
|
|
@ -11,24 +11,31 @@ command line switches.
|
||||||
Most options can also be set in an `.aider.conf.yml` file
|
Most options can also be set in an `.aider.conf.yml` file
|
||||||
which can be placed in your home directory or at the root of
|
which can be placed in your home directory or at the root of
|
||||||
your git repo.
|
your git repo.
|
||||||
Or via environment variables like `AIDER_xxx`,
|
Or by setting environment variables like `AIDER_xxx`
|
||||||
as noted in the [options reference](options.html).
|
either in your shell or a `.env` file.
|
||||||
|
|
||||||
Here are 3 equivalent ways of setting an option. First, via a command line switch:
|
Here are 4 equivalent ways of setting an option.
|
||||||
|
|
||||||
|
With a command line switch:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ aider --dark-mode
|
$ aider --dark-mode
|
||||||
```
|
```
|
||||||
|
|
||||||
Or, via an env variable:
|
Using a `.aider.conf.yml` file:
|
||||||
|
|
||||||
```
|
|
||||||
export AIDER_DARK_MODE=true
|
|
||||||
```
|
|
||||||
|
|
||||||
Or in the `.aider.conf.yml` file:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
dark-mode: true
|
dark-mode: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
By setting an environgment variable:
|
||||||
|
|
||||||
|
```
|
||||||
|
export AIDER_DARK_MODE=true
|
||||||
|
```
|
||||||
|
|
||||||
|
Using an `.env` file:
|
||||||
|
|
||||||
|
```
|
||||||
|
AIDER_DARK_MODE=true
|
||||||
|
```
|
||||||
|
|
86
website/docs/config/adv-model-settings.md
Normal file
86
website/docs/config/adv-model-settings.md
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
---
|
||||||
|
parent: Configuration
|
||||||
|
nav_order: 950
|
||||||
|
description: Configuring advanced settings for LLMs.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Advanced model settings
|
||||||
|
|
||||||
|
## Context window size and token costs
|
||||||
|
|
||||||
|
In most cases, you can safely ignore aider's warning about unknown context
|
||||||
|
window size and model costs.
|
||||||
|
|
||||||
|
But, you can register context window limits and costs for models that aren't known
|
||||||
|
to aider. Create a `.aider.litellm.models.json` file in one of these locations:
|
||||||
|
|
||||||
|
- Your home directory.
|
||||||
|
- The root if your git repo.
|
||||||
|
- The current directory where you launch aider.
|
||||||
|
- Or specify a specific file with the `--model-metadata-file <filename>` switch.
|
||||||
|
|
||||||
|
|
||||||
|
If the files above exist, they will be loaded in that order.
|
||||||
|
Files loaded last will take priority.
|
||||||
|
|
||||||
|
The json file should be a dictionary with an entry for each model, as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"deepseek-chat": {
|
||||||
|
"max_tokens": 4096,
|
||||||
|
"max_input_tokens": 32000,
|
||||||
|
"max_output_tokens": 4096,
|
||||||
|
"input_cost_per_token": 0.00000014,
|
||||||
|
"output_cost_per_token": 0.00000028,
|
||||||
|
"litellm_provider": "deepseek",
|
||||||
|
"mode": "chat"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
See
|
||||||
|
[litellm's model_prices_and_context_window.json file](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) for more examples.
|
||||||
|
|
||||||
|
## Model settings
|
||||||
|
|
||||||
|
Aider has a number of settings that control how it works with
|
||||||
|
different models.
|
||||||
|
These model settings are pre-configured for most popular models.
|
||||||
|
But it can sometimes be helpful to override them or add settings for
|
||||||
|
a model that aider doesn't know about.
|
||||||
|
|
||||||
|
To do that,
|
||||||
|
create a `.aider.models.yml` file in one of these locations:
|
||||||
|
|
||||||
|
- Your home directory.
|
||||||
|
- The root if your git repo.
|
||||||
|
- The current directory where you launch aider.
|
||||||
|
- Or specify a specific file with the `--model-settings-file <filename>` switch.
|
||||||
|
|
||||||
|
If the files above exist, they will be loaded in that order.
|
||||||
|
Files loaded last will take priority.
|
||||||
|
|
||||||
|
The yaml file should be a a list of dictionary objects for each model, as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
- name: "gpt-3.5-turbo"
|
||||||
|
edit_format: "whole"
|
||||||
|
weak_model_name: "gpt-3.5-turbo"
|
||||||
|
use_repo_map: false
|
||||||
|
send_undo_reply: false
|
||||||
|
accepts_images: false
|
||||||
|
lazy: false
|
||||||
|
reminder_as_sys_msg: true
|
||||||
|
examples_as_sys_msg: false
|
||||||
|
- name: "gpt-4-turbo-2024-04-09"
|
||||||
|
edit_format: "udiff"
|
||||||
|
weak_model_name: "gpt-3.5-turbo"
|
||||||
|
use_repo_map: true
|
||||||
|
send_undo_reply: true
|
||||||
|
accepts_images: true
|
||||||
|
lazy: true
|
||||||
|
reminder_as_sys_msg: true
|
||||||
|
examples_as_sys_msg: false
|
||||||
|
```
|
||||||
|
|
|
@ -41,9 +41,6 @@ cog.outl("```")
|
||||||
#######
|
#######
|
||||||
# Main:
|
# Main:
|
||||||
|
|
||||||
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
|
||||||
#llm-history-file:
|
|
||||||
|
|
||||||
## Specify the OpenAI API key
|
## Specify the OpenAI API key
|
||||||
#openai-api-key:
|
#openai-api-key:
|
||||||
|
|
||||||
|
@ -131,6 +128,9 @@ cog.outl("```")
|
||||||
## Restore the previous chat history messages (default: False)
|
## Restore the previous chat history messages (default: False)
|
||||||
#restore-chat-history: false
|
#restore-chat-history: false
|
||||||
|
|
||||||
|
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
||||||
|
#llm-history-file:
|
||||||
|
|
||||||
##################
|
##################
|
||||||
# Output Settings:
|
# Output Settings:
|
||||||
|
|
||||||
|
@ -188,6 +188,9 @@ cog.outl("```")
|
||||||
## Attribute aider commits in the git committer name (default: True)
|
## Attribute aider commits in the git committer name (default: True)
|
||||||
#attribute-committer: true
|
#attribute-committer: true
|
||||||
|
|
||||||
|
## Prefix commit messages with 'aider: ' (default: False)
|
||||||
|
#attribute-commit-message: false
|
||||||
|
|
||||||
## Perform a dry run without modifying files (default: False)
|
## Perform a dry run without modifying files (default: False)
|
||||||
#dry-run: false
|
#dry-run: false
|
||||||
|
|
|
@ -54,9 +54,6 @@ cog.outl("```")
|
||||||
#######
|
#######
|
||||||
# Main:
|
# Main:
|
||||||
|
|
||||||
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
|
||||||
#AIDER_LLM_HISTORY_FILE=
|
|
||||||
|
|
||||||
## Specify the OpenAI API key
|
## Specify the OpenAI API key
|
||||||
#OPENAI_API_KEY=
|
#OPENAI_API_KEY=
|
||||||
|
|
||||||
|
@ -144,6 +141,9 @@ cog.outl("```")
|
||||||
## Restore the previous chat history messages (default: False)
|
## Restore the previous chat history messages (default: False)
|
||||||
#AIDER_RESTORE_CHAT_HISTORY=false
|
#AIDER_RESTORE_CHAT_HISTORY=false
|
||||||
|
|
||||||
|
## Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
||||||
|
#AIDER_LLM_HISTORY_FILE=
|
||||||
|
|
||||||
##################
|
##################
|
||||||
# Output Settings:
|
# Output Settings:
|
||||||
|
|
||||||
|
@ -201,6 +201,9 @@ cog.outl("```")
|
||||||
## Attribute aider commits in the git committer name (default: True)
|
## Attribute aider commits in the git committer name (default: True)
|
||||||
#AIDER_ATTRIBUTE_COMMITTER=true
|
#AIDER_ATTRIBUTE_COMMITTER=true
|
||||||
|
|
||||||
|
## Prefix commit messages with 'aider: ' (default: False)
|
||||||
|
#AIDER_ATTRIBUTE_COMMIT_MESSAGE=false
|
||||||
|
|
||||||
## Perform a dry run without modifying files (default: False)
|
## Perform a dry run without modifying files (default: False)
|
||||||
#AIDER_DRY_RUN=false
|
#AIDER_DRY_RUN=false
|
||||||
|
|
|
@ -20,29 +20,29 @@ from aider.args import get_md_help
|
||||||
cog.out(get_md_help())
|
cog.out(get_md_help())
|
||||||
]]]-->
|
]]]-->
|
||||||
```
|
```
|
||||||
usage: aider [-h] [--llm-history-file] [--openai-api-key]
|
usage: aider [-h] [--openai-api-key] [--anthropic-api-key] [--model]
|
||||||
[--anthropic-api-key] [--model] [--opus] [--sonnet]
|
[--opus] [--sonnet] [--4] [--4o] [--4-turbo]
|
||||||
[--4] [--4o] [--4-turbo] [--35turbo] [--models]
|
[--35turbo] [--models] [--openai-api-base]
|
||||||
[--openai-api-base] [--openai-api-type]
|
[--openai-api-type] [--openai-api-version]
|
||||||
[--openai-api-version] [--openai-api-deployment-id]
|
[--openai-api-deployment-id] [--openai-organization-id]
|
||||||
[--openai-organization-id] [--model-settings-file]
|
[--model-settings-file] [--model-metadata-file]
|
||||||
[--model-metadata-file]
|
|
||||||
[--verify-ssl | --no-verify-ssl] [--edit-format]
|
[--verify-ssl | --no-verify-ssl] [--edit-format]
|
||||||
[--weak-model]
|
[--weak-model]
|
||||||
[--show-model-warnings | --no-show-model-warnings]
|
[--show-model-warnings | --no-show-model-warnings]
|
||||||
[--map-tokens] [--max-chat-history-tokens] [--env-file]
|
[--map-tokens] [--max-chat-history-tokens] [--env-file]
|
||||||
[--input-history-file] [--chat-history-file]
|
[--input-history-file] [--chat-history-file]
|
||||||
[--restore-chat-history | --no-restore-chat-history]
|
[--restore-chat-history | --no-restore-chat-history]
|
||||||
[--dark-mode] [--light-mode] [--pretty | --no-pretty]
|
[--llm-history-file] [--dark-mode] [--light-mode]
|
||||||
[--stream | --no-stream] [--user-input-color]
|
[--pretty | --no-pretty] [--stream | --no-stream]
|
||||||
[--tool-output-color] [--tool-error-color]
|
[--user-input-color] [--tool-output-color]
|
||||||
[--assistant-output-color] [--code-theme]
|
[--tool-error-color] [--assistant-output-color]
|
||||||
[--show-diffs] [--git | --no-git]
|
[--code-theme] [--show-diffs] [--git | --no-git]
|
||||||
[--gitignore | --no-gitignore] [--aiderignore]
|
[--gitignore | --no-gitignore] [--aiderignore]
|
||||||
[--auto-commits | --no-auto-commits]
|
[--auto-commits | --no-auto-commits]
|
||||||
[--dirty-commits | --no-dirty-commits]
|
[--dirty-commits | --no-dirty-commits]
|
||||||
[--attribute-author | --no-attribute-author]
|
[--attribute-author | --no-attribute-author]
|
||||||
[--attribute-committer | --no-attribute-committer]
|
[--attribute-committer | --no-attribute-committer]
|
||||||
|
[--attribute-commit-message | --no-attribute-commit-message]
|
||||||
[--dry-run | --no-dry-run] [--commit] [--lint]
|
[--dry-run | --no-dry-run] [--commit] [--lint]
|
||||||
[--lint-cmd] [--auto-lint | --no-auto-lint]
|
[--lint-cmd] [--auto-lint | --no-auto-lint]
|
||||||
[--test-cmd] [--auto-test | --no-auto-test] [--test]
|
[--test-cmd] [--auto-test | --no-auto-test] [--test]
|
||||||
|
@ -63,10 +63,6 @@ Aliases:
|
||||||
|
|
||||||
## Main:
|
## Main:
|
||||||
|
|
||||||
### `--llm-history-file LLM_HISTORY_FILE`
|
|
||||||
Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
|
||||||
Environment variable: `AIDER_LLM_HISTORY_FILE`
|
|
||||||
|
|
||||||
### `--openai-api-key OPENAI_API_KEY`
|
### `--openai-api-key OPENAI_API_KEY`
|
||||||
Specify the OpenAI API key
|
Specify the OpenAI API key
|
||||||
Environment variable: `OPENAI_API_KEY`
|
Environment variable: `OPENAI_API_KEY`
|
||||||
|
@ -204,6 +200,10 @@ Aliases:
|
||||||
- `--restore-chat-history`
|
- `--restore-chat-history`
|
||||||
- `--no-restore-chat-history`
|
- `--no-restore-chat-history`
|
||||||
|
|
||||||
|
### `--llm-history-file LLM_HISTORY_FILE`
|
||||||
|
Log the conversation with the LLM to this file (for example, .aider.llm.history)
|
||||||
|
Environment variable: `AIDER_LLM_HISTORY_FILE`
|
||||||
|
|
||||||
## Output Settings:
|
## Output Settings:
|
||||||
|
|
||||||
### `--dark-mode`
|
### `--dark-mode`
|
||||||
|
@ -316,6 +316,14 @@ Aliases:
|
||||||
- `--attribute-committer`
|
- `--attribute-committer`
|
||||||
- `--no-attribute-committer`
|
- `--no-attribute-committer`
|
||||||
|
|
||||||
|
### `--attribute-commit-message`
|
||||||
|
Prefix commit messages with 'aider: ' (default: False)
|
||||||
|
Default: False
|
||||||
|
Environment variable: `AIDER_ATTRIBUTE_COMMIT_MESSAGE`
|
||||||
|
Aliases:
|
||||||
|
- `--attribute-commit-message`
|
||||||
|
- `--no-attribute-commit-message`
|
||||||
|
|
||||||
### `--dry-run`
|
### `--dry-run`
|
||||||
Perform a dry run without modifying files (default: False)
|
Perform a dry run without modifying files (default: False)
|
||||||
Default: False
|
Default: False
|
|
@ -44,3 +44,6 @@ Aider marks commits that it either authored or committed.
|
||||||
|
|
||||||
You can use `--no-attribute-author` and `--no-attribute-committer` to disable
|
You can use `--no-attribute-author` and `--no-attribute-committer` to disable
|
||||||
modification of the git author and committer name fields.
|
modification of the git author and committer name fields.
|
||||||
|
|
||||||
|
Additionally, you can use `--attribute-commit-message` to prefix commit messages with 'aider: '.
|
||||||
|
This option is disabled by default, but can be useful for easily identifying commits made by aider.
|
||||||
|
|
|
@ -8,70 +8,3 @@ nav_order: 900
|
||||||
{% include model-warnings.md %}
|
{% include model-warnings.md %}
|
||||||
|
|
||||||
|
|
||||||
## Adding settings for missing models
|
|
||||||
You can register model settings used by aider for unknown models.
|
|
||||||
Create a `.aider.models.yml` file in one of these locations:
|
|
||||||
|
|
||||||
- Your home directory.
|
|
||||||
- The root if your git repo.
|
|
||||||
- The current directory where you launch aider.
|
|
||||||
- Or specify a specific file with the `--model-settings-file <filename>` switch.
|
|
||||||
|
|
||||||
If the files above exist, they will be loaded in that order.
|
|
||||||
Files loaded last will take priority.
|
|
||||||
|
|
||||||
The yaml file should be a a list of dictionary objects for each model, as follows:
|
|
||||||
|
|
||||||
```
|
|
||||||
- name: "gpt-3.5-turbo"
|
|
||||||
edit_format: "whole"
|
|
||||||
weak_model_name: "gpt-3.5-turbo"
|
|
||||||
use_repo_map: false
|
|
||||||
send_undo_reply: false
|
|
||||||
accepts_images: false
|
|
||||||
lazy: false
|
|
||||||
reminder_as_sys_msg: true
|
|
||||||
examples_as_sys_msg: false
|
|
||||||
- name: "gpt-4-turbo-2024-04-09"
|
|
||||||
edit_format: "udiff"
|
|
||||||
weak_model_name: "gpt-3.5-turbo"
|
|
||||||
use_repo_map: true
|
|
||||||
send_undo_reply: true
|
|
||||||
accepts_images: true
|
|
||||||
lazy: true
|
|
||||||
reminder_as_sys_msg: true
|
|
||||||
examples_as_sys_msg: false
|
|
||||||
```
|
|
||||||
|
|
||||||
## Specifying context window size and token costs
|
|
||||||
|
|
||||||
You can register context window limits and costs for models that aren't known
|
|
||||||
to aider. Create a `.aider.litellm.models.json` file in one of these locations:
|
|
||||||
|
|
||||||
- Your home directory.
|
|
||||||
- The root if your git repo.
|
|
||||||
- The current directory where you launch aider.
|
|
||||||
- Or specify a specific file with the `--model-metadata-file <filename>` switch.
|
|
||||||
|
|
||||||
|
|
||||||
If the files above exist, they will be loaded in that order.
|
|
||||||
Files loaded last will take priority.
|
|
||||||
|
|
||||||
The json file should be a dictionary with an entry for each model, as follows:
|
|
||||||
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"deepseek-chat": {
|
|
||||||
"max_tokens": 4096,
|
|
||||||
"max_input_tokens": 32000,
|
|
||||||
"max_output_tokens": 4096,
|
|
||||||
"input_cost_per_token": 0.00000014,
|
|
||||||
"output_cost_per_token": 0.00000028,
|
|
||||||
"litellm_provider": "deepseek",
|
|
||||||
"mode": "chat"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
See
|
|
||||||
[litellm's model_prices_and_context_window.json file](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) for more examples.
|
|
||||||
|
|
|
@ -21,8 +21,8 @@ In these cases, here are some things you might try.
|
||||||
|
|
||||||
## Use a capable model
|
## Use a capable model
|
||||||
|
|
||||||
If possible try using GPT-4o or Opus, as they are the strongest and most
|
If possible try using GPT-4o, Claude 3.5 Sonnet or Claude 3 Opus,
|
||||||
capable models.
|
as they are the strongest and most capable models.
|
||||||
|
|
||||||
Weaker models
|
Weaker models
|
||||||
are more prone to
|
are more prone to
|
||||||
|
|
|
@ -37,26 +37,31 @@ Use /help to see in-chat commands, run with --help to see cmd line args
|
||||||
|
|
||||||
## Adding files
|
## Adding files
|
||||||
|
|
||||||
Just add the files that the aider will need to *edit*.
|
Add the files that the aider will need to *edit*.
|
||||||
|
|
||||||
|
Don't add a bunch of extra files.
|
||||||
If you add too many files, the LLM can get overwhelmed
|
If you add too many files, the LLM can get overwhelmed
|
||||||
and confused (and it costs more tokens).
|
and confused (and it costs more tokens).
|
||||||
Aider will automatically
|
Aider will automatically
|
||||||
pull in content from related files so that it can
|
pull in content from related files so that it can
|
||||||
[understand the rest of your code base](https://aider.chat/docs/repomap.html).
|
[understand the rest of your code base](https://aider.chat/docs/repomap.html).
|
||||||
|
|
||||||
You can also run aider without naming any files and use the in-chat
|
You add files to the chat by naming them on the aider command line.
|
||||||
|
Or, you can use the in-chat
|
||||||
`/add` command to add files.
|
`/add` command to add files.
|
||||||
|
|
||||||
Or you can skip adding files completely, and aider
|
You can use aider without adding any files,
|
||||||
will try to figure out which files need to be edited based
|
and it will try to figure out which files need to be edited based
|
||||||
on your requests.
|
on your requests.
|
||||||
|
But you'll get the best results if you add the files that need
|
||||||
|
to edited.
|
||||||
|
|
||||||
## LLMs
|
## LLMs
|
||||||
|
|
||||||
Aider uses GPT-4o by default, but you can
|
Aider uses GPT-4o by default, but you can
|
||||||
[connect to many different LLMs](/docs/llms.html).
|
[connect to many different LLMs](/docs/llms.html).
|
||||||
Claude 3 Opus is another model which works very well with aider,
|
Claude 3.5 Sonnet also works very well with aider,
|
||||||
which you can use by running `aider --opus`.
|
which you can use by running `aider --sonnet`.
|
||||||
|
|
||||||
You can run `aider --model XXX` to launch aider with
|
You can run `aider --model XXX` to launch aider with
|
||||||
a specific model.
|
a specific model.
|
||||||
|
@ -68,8 +73,8 @@ Or, during your chat you can switch models with the in-chat
|
||||||
Ask aider to make changes to your code.
|
Ask aider to make changes to your code.
|
||||||
It will show you some diffs of the changes it is making to
|
It will show you some diffs of the changes it is making to
|
||||||
complete you request.
|
complete you request.
|
||||||
Aider will git commit all of its changes,
|
[Aider will git commit all of its changes](/docs/git.html),
|
||||||
so they are easy to track and undo.
|
so they are easy to track and undo.
|
||||||
|
|
||||||
You can always use the `/undo` command to undo changes you don't
|
You can always use the `/undo` command to undo AI changes that you don't
|
||||||
like.
|
like.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue