diff --git a/HISTORY.md b/HISTORY.md
index 2807e037d..6a520a8c2 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,6 +1,10 @@
# Release history
+### Aider v0.50.1
+
+- Bugfix for provider API exceptions.
+
### Aider v0.50.0
- Infinite output for DeepSeek Coder, Mistral models in addition to Anthropic's models.
diff --git a/aider/__init__.py b/aider/__init__.py
index 41d36926a..a7d6d6205 100644
--- a/aider/__init__.py
+++ b/aider/__init__.py
@@ -1 +1 @@
-__version__ = "0.50.1-dev"
+__version__ = "0.50.2-dev"
diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py
index f4f1319bf..f3cb8923a 100755
--- a/aider/coders/base_coder.py
+++ b/aider/coders/base_coder.py
@@ -1244,6 +1244,7 @@ class Coder:
self.io.log_llm_history("TO LLM", format_messages(messages))
+ completion = None
try:
hash_object, completion = send_completion(
model.name,
@@ -1263,6 +1264,8 @@ class Coder:
except KeyboardInterrupt as kbi:
self.keyboard_interrupt()
raise kbi
+ except Exception as e:
+ self.io.tool_error(f"Error during API call: {str(e)}")
finally:
self.io.log_llm_history(
"LLM RESPONSE",
diff --git a/aider/coders/editblock_prompts.py b/aider/coders/editblock_prompts.py
index af3e4f3b7..7a2acdae9 100644
--- a/aider/coders/editblock_prompts.py
+++ b/aider/coders/editblock_prompts.py
@@ -125,8 +125,8 @@ Every *SEARCH/REPLACE block* must use this format:
7. The end of the replace block: >>>>>>> REPLACE
8. The closing fence: {fence[1]}
-Every *SEARCH* section must *EXACTLY MATCH* the existing source code, character for character, including all comments, docstrings, etc.
-
+Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc.
+If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup.
*SEARCH/REPLACE* blocks will replace *all* matching occurrences.
Include enough lines to make the SEARCH blocks uniquely match the lines to change.
diff --git a/aider/commands.py b/aider/commands.py
index 599d5013d..5bfaac932 100644
--- a/aider/commands.py
+++ b/aider/commands.py
@@ -724,7 +724,7 @@ class Commands:
add = result.returncode != 0
else:
response = self.io.prompt_ask(
- "Add the output to the chat?\n(y/n/instructions)", default=""
+ "Add the output to the chat?\n(Y/n/instructions)", default=""
).strip()
if response.lower() in ["yes", "y"]:
diff --git a/aider/main.py b/aider/main.py
index bb647d55d..be0268f88 100644
--- a/aider/main.py
+++ b/aider/main.py
@@ -327,6 +327,17 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
parser = get_parser(default_config_files, git_root)
args, unknown = parser.parse_known_args(argv)
+ if args.verbose:
+ print("Config files search order, if no --config:")
+ for file in default_config_files:
+ exists = "(exists)" if Path(file).exists() else ""
+ print(f" - {file} {exists}")
+
+ default_config_files.reverse()
+
+ parser = get_parser(default_config_files, git_root)
+ args, unknown = parser.parse_known_args(argv)
+
# Load the .env file specified in the arguments
loaded_dotenvs = load_dotenv_files(git_root, args.env_file)
diff --git a/aider/website/HISTORY.md b/aider/website/HISTORY.md
index d70bde900..96420b06a 100644
--- a/aider/website/HISTORY.md
+++ b/aider/website/HISTORY.md
@@ -16,6 +16,10 @@ cog.out(text)
# Release history
+### Aider v0.50.1
+
+- Bugfix for provider API exceptions.
+
### Aider v0.50.0
- Infinite output for DeepSeek Coder, Mistral models in addition to Anthropic's models.
diff --git a/aider/website/_data/edit_leaderboard.yml b/aider/website/_data/edit_leaderboard.yml
index 52f667849..426c86336 100644
--- a/aider/website/_data/edit_leaderboard.yml
+++ b/aider/website/_data/edit_leaderboard.yml
@@ -577,6 +577,7 @@
pass_rate_2: 77.4
percent_cases_well_formed: 99.2
error_outputs: 23
+ released: 2024-06-20
num_malformed_responses: 4
num_with_malformed_responses: 1
user_asks: 2
@@ -603,6 +604,7 @@
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 0
+ released: 2024-03-13
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
@@ -644,6 +646,7 @@
commit_hash: d31eef3-dirty
pass_rate_1: 40.6
pass_rate_2: 55.6
+ released: 2024-07-18
percent_cases_well_formed: 100.0
error_outputs: 1
num_malformed_responses: 0
@@ -668,6 +671,7 @@
pass_rate_1: 60.9
pass_rate_2: 69.9
percent_cases_well_formed: 97.7
+ released: 2024-06-28
error_outputs: 58
num_malformed_responses: 13
num_with_malformed_responses: 3
@@ -690,6 +694,7 @@
commit_hash: f7ce78b-dirty
pass_rate_1: 46.6
pass_rate_2: 63.9
+ released: 2024-07-23
percent_cases_well_formed: 92.5
error_outputs: 84
num_malformed_responses: 19
@@ -716,6 +721,7 @@
percent_cases_well_formed: 100.0
error_outputs: 0
num_malformed_responses: 0
+ released: 2024-07-23
num_with_malformed_responses: 0
user_asks: 0
lazy_comments: 0
@@ -738,6 +744,7 @@
pass_rate_2: 72.9
percent_cases_well_formed: 97.7
error_outputs: 13
+ released: 2024-07-24
num_malformed_responses: 3
num_with_malformed_responses: 3
user_asks: 1
@@ -763,6 +770,7 @@
error_outputs: 3
num_malformed_responses: 0
num_with_malformed_responses: 0
+ released: 2024-07-24
user_asks: 3
lazy_comments: 0
syntax_errors: 1
@@ -785,6 +793,7 @@
percent_cases_well_formed: 100.0
error_outputs: 27
num_malformed_responses: 0
+ released: 2024-07-23
num_with_malformed_responses: 0
user_asks: 23
lazy_comments: 8
@@ -810,6 +819,7 @@
num_malformed_responses: 0
num_with_malformed_responses: 0
user_asks: 0
+ released: 2024-07-23
lazy_comments: 0
syntax_errors: 0
indentation_errors: 0
@@ -838,9 +848,34 @@
indentation_errors: 2
exhausted_context_windows: 0
test_timeouts: 5
+ released: 2024-08-06
command: aider --model openai/gpt-4o-2024-08-06
date: 2024-08-06
versions: 0.48.1-dev
seconds_per_case: 6.5
total_cost: 0.0000
+
+- dirname: 2024-08-14-13-07-12--chatgpt-4o-latest-diff
+ test_cases: 133
+ model: chatgpt-4o-latest
+ edit_format: diff
+ commit_hash: b1c3769
+ pass_rate_1: 53.4
+ pass_rate_2: 69.2
+ percent_cases_well_formed: 97.7
+ error_outputs: 27
+ num_malformed_responses: 5
+ num_with_malformed_responses: 3
+ user_asks: 7
+ lazy_comments: 0
+ syntax_errors: 0
+ indentation_errors: 0
+ exhausted_context_windows: 0
+ test_timeouts: 0
+ command: aider --model openai/chatgpt-4o-latest
+ date: 2024-08-14
+ released: 2024-08-08
+ versions: 0.50.2-dev
+ seconds_per_case: 26.3
+ total_cost: 3.6113
\ No newline at end of file
diff --git a/aider/website/assets/models-over-time.svg b/aider/website/assets/models-over-time.svg
index 994dab6ae..a4fe87061 100644
--- a/aider/website/assets/models-over-time.svg
+++ b/aider/website/assets/models-over-time.svg
@@ -1,16 +1,16 @@
-
diff --git a/aider/website/docs/usage/images-urls.md b/aider/website/docs/usage/images-urls.md
index 2cecfdef6..b3fb6797a 100644
--- a/aider/website/docs/usage/images-urls.md
+++ b/aider/website/docs/usage/images-urls.md
@@ -23,7 +23,7 @@ You can add images to the chat just like you would
add any other file:
- Use `/add ` from within the chat
-- Use `/add-clipboard-image` to paste an image from your clipboard into the chat.
+- Use `/clipboard` to paste an image from your clipboard into the chat.
- Launch aider with image filenames on the command line: `aider ` along with any other command line arguments you need.
## Web pages
diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py
index 6261e00f9..61f9bf1c4 100755
--- a/benchmark/benchmark.py
+++ b/benchmark/benchmark.py
@@ -28,8 +28,6 @@ from aider.coders import Coder
from aider.dump import dump # noqa: F401
from aider.io import InputOutput
-load_dotenv()
-
BENCHMARK_DNAME = Path(os.environ.get("AIDER_BENCHMARK_DIR", "tmp.benchmarks"))
EXERCISES_DIR_DEFAULT = "exercism-python"
@@ -39,6 +37,8 @@ app = typer.Typer(add_completion=False, pretty_exceptions_enable=False)
NUM_TESTS = (89, 133)
+load_dotenv(override=True)
+
def show_stats(dirnames, graphs):
raw_rows = []
@@ -378,7 +378,7 @@ def summarize_results(dirname):
pass_rate = 100 * passed_tests[i] / res.completed_tests
percents[i] = pass_rate
# console.print(f"{pass_rate:.1f}% correct after try {i+1}")
- setattr(res, f"pass_rate_{i+1}", f"{pass_rate:.1f}")
+ setattr(res, f"pass_rate_{i + 1}", f"{pass_rate:.1f}")
print(f"- dirname: {dirname.name}")
style = None if res.completed_tests in NUM_TESTS else "red"
@@ -393,10 +393,10 @@ def summarize_results(dirname):
console.print(f" {key}: {val}", style=style)
for i in range(tries):
- print(f" pass_rate_{i+1}: {percents[i]:.1f}")
+ print(f" pass_rate_{i + 1}: {percents[i]:.1f}")
pct_well_formed = 1.0 - res.num_with_malformed_responses / res.completed_tests
- print(f" percent_cases_well_formed: {pct_well_formed*100:.1f}")
+ print(f" percent_cases_well_formed: {pct_well_formed * 100:.1f}")
show("error_outputs")
show("num_malformed_responses")
@@ -564,7 +564,6 @@ def run_test_real(
fnames=fnames,
use_git=False,
stream=False,
- pretty=False,
verbose=verbose,
)
coder.max_apply_update_errors = max_apply_update_errors
@@ -591,7 +590,7 @@ def run_test_real(
coder.apply_updates()
else:
- response = coder.run(with_message=instructions)
+ response = coder.run(with_message=instructions, preproc=False)
dur += time.time() - start
if not no_aider:
diff --git a/benchmark/over_time.py b/benchmark/over_time.py
index 0ea641d64..565038a8e 100644
--- a/benchmark/over_time.py
+++ b/benchmark/over_time.py
@@ -3,6 +3,8 @@ import yaml
from imgcat import imgcat
from matplotlib import rc
+from aider.dump import dump # noqa: 401
+
def plot_over_time(yaml_file):
with open(yaml_file, "r") as file:
@@ -12,49 +14,97 @@ def plot_over_time(yaml_file):
pass_rates = []
models = []
+ print("Debug: Raw data from YAML file:")
+ print(data)
+
for entry in data:
if "released" in entry and "pass_rate_2" in entry:
dates.append(entry["released"])
pass_rates.append(entry["pass_rate_2"])
models.append(entry["model"].split("(")[0].strip())
+ print("Debug: Processed data:")
+ print("Dates:", dates)
+ print("Pass rates:", pass_rates)
+ print("Models:", models)
+
+ if not dates or not pass_rates:
+ print(
+ "Error: No data to plot. Check if the YAML file is empty or if the data is in the"
+ " expected format."
+ )
+ return
+
plt.rcParams["hatch.linewidth"] = 0.5
plt.rcParams["hatch.color"] = "#444444"
rc("font", **{"family": "sans-serif", "sans-serif": ["Helvetica"], "size": 10})
plt.rcParams["text.color"] = "#444444"
- fig, ax = plt.subplots(figsize=(10, 5))
+ fig, ax = plt.subplots(figsize=(12, 6)) # Increase figure size for better visibility
+
+ print("Debug: Figure created. Plotting data...")
ax.grid(axis="y", zorder=0, lw=0.2)
for spine in ax.spines.values():
spine.set_edgecolor("#DDDDDD")
spine.set_linewidth(0.5)
colors = [
- "red" if "gpt-4" in model else "green" if "gpt-3.5" in model else "blue" for model in models
+ (
+ "purple"
+ if "-4o" in model and "gpt-4o-mini" not in model
+ else "red" if "gpt-4" in model else "green" if "gpt-3.5" in model else "lightblue"
+ )
+ for model in models
]
+
+ # Separate data points by color
+ purple_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "purple"]
+ red_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "red"]
+ green_points = [(d, r) for d, r, c in zip(dates, pass_rates, colors) if c == "green"]
+
+ # Plot lines for purple, red, and green points
+ if purple_points:
+ purple_dates, purple_rates = zip(*sorted(purple_points))
+ ax.plot(purple_dates, purple_rates, c="purple", alpha=0.5, linewidth=1)
+ if red_points:
+ red_dates, red_rates = zip(*sorted(red_points))
+ ax.plot(red_dates, red_rates, c="red", alpha=0.5, linewidth=1)
+ if green_points:
+ green_dates, green_rates = zip(*sorted(green_points))
+ ax.plot(green_dates, green_rates, c="green", alpha=0.5, linewidth=1)
+
+ # Plot all points
ax.scatter(dates, pass_rates, c=colors, alpha=0.5, s=120)
for i, model in enumerate(models):
ax.annotate(
model,
(dates[i], pass_rates[i]),
- fontsize=12,
+ fontsize=8,
alpha=0.75,
xytext=(5, 5),
textcoords="offset points",
)
ax.set_xlabel("Model release date", fontsize=18, color="#555")
- ax.set_ylabel("Aider code editing benchmark,\npercent completed correctly", fontsize=18, color="#555")
+ ax.set_ylabel(
+ "Aider code editing benchmark,\npercent completed correctly", fontsize=18, color="#555"
+ )
ax.set_title("LLM code editing skill by model release date", fontsize=20)
- ax.set_ylim(0, 30)
- plt.xticks(fontsize=14)
+ ax.set_ylim(0, 100) # Adjust y-axis limit to accommodate higher values
+ plt.xticks(fontsize=14, rotation=45, ha="right") # Rotate x-axis labels for better readability
plt.tight_layout(pad=3.0)
+
+ print("Debug: Saving figures...")
plt.savefig("tmp_over_time.png")
plt.savefig("tmp_over_time.svg")
+
+ print("Debug: Displaying figure with imgcat...")
imgcat(fig)
+ print("Debug: Figure generation complete.")
+
# Example usage
-plot_over_time("_data/edit_leaderboard.yml")
+plot_over_time("aider/website/_data/edit_leaderboard.yml")
diff --git a/tests/basic/test_main.py b/tests/basic/test_main.py
index 1560894a0..1f0d467b5 100644
--- a/tests/basic/test_main.py
+++ b/tests/basic/test_main.py
@@ -226,9 +226,10 @@ class TestMain(TestCase):
def test_main_exit_calls_version_check(self):
with GitTemporaryDirectory():
- with patch("aider.main.check_version") as mock_check_version, patch(
- "aider.main.InputOutput"
- ) as mock_input_output:
+ with (
+ patch("aider.main.check_version") as mock_check_version,
+ patch("aider.main.InputOutput") as mock_input_output,
+ ):
main(["--exit"], input=DummyInput(), output=DummyOutput())
mock_check_version.assert_called_once()
mock_input_output.assert_called_once()
@@ -373,6 +374,67 @@ class TestMain(TestCase):
self.assertRegex(relevant_output, r"AIDER_DARK_MODE:\s+on")
self.assertRegex(relevant_output, r"dark_mode:\s+True")
+ def test_yaml_config_file_loading(self):
+ with GitTemporaryDirectory() as git_dir:
+ git_dir = Path(git_dir)
+
+ # Create fake home directory
+ fake_home = git_dir / "fake_home"
+ fake_home.mkdir()
+ os.environ["HOME"] = str(fake_home)
+
+ # Create subdirectory as current working directory
+ cwd = git_dir / "subdir"
+ cwd.mkdir()
+ os.chdir(cwd)
+
+ # Create .aider.conf.yml files in different locations
+ home_config = fake_home / ".aider.conf.yml"
+ git_config = git_dir / ".aider.conf.yml"
+ cwd_config = cwd / ".aider.conf.yml"
+ named_config = git_dir / "named.aider.conf.yml"
+
+ cwd_config.write_text("model: gpt-4-32k\nmap-tokens: 4096\n")
+ git_config.write_text("model: gpt-4\nmap-tokens: 2048\n")
+ home_config.write_text("model: gpt-3.5-turbo\nmap-tokens: 1024\n")
+ named_config.write_text("model: gpt-4-1106-preview\nmap-tokens: 8192\n")
+
+ with (
+ patch("pathlib.Path.home", return_value=fake_home),
+ patch("aider.coders.Coder.create") as MockCoder,
+ ):
+ # Test loading from specified config file
+ main(
+ ["--yes", "--exit", "--config", str(named_config)],
+ input=DummyInput(),
+ output=DummyOutput(),
+ )
+ _, kwargs = MockCoder.call_args
+ self.assertEqual(kwargs["main_model"].name, "gpt-4-1106-preview")
+ self.assertEqual(kwargs["map_tokens"], 8192)
+
+ # Test loading from current working directory
+ main(["--yes", "--exit"], input=DummyInput(), output=DummyOutput())
+ _, kwargs = MockCoder.call_args
+ print("kwargs:", kwargs) # Add this line for debugging
+ self.assertIn("main_model", kwargs, "main_model key not found in kwargs")
+ self.assertEqual(kwargs["main_model"].name, "gpt-4-32k")
+ self.assertEqual(kwargs["map_tokens"], 4096)
+
+ # Test loading from git root
+ cwd_config.unlink()
+ main(["--yes", "--exit"], input=DummyInput(), output=DummyOutput())
+ _, kwargs = MockCoder.call_args
+ self.assertEqual(kwargs["main_model"].name, "gpt-4")
+ self.assertEqual(kwargs["map_tokens"], 2048)
+
+ # Test loading from home directory
+ git_config.unlink()
+ main(["--yes", "--exit"], input=DummyInput(), output=DummyOutput())
+ _, kwargs = MockCoder.call_args
+ self.assertEqual(kwargs["main_model"].name, "gpt-3.5-turbo")
+ self.assertEqual(kwargs["map_tokens"], 1024)
+
def test_map_tokens_option(self):
with GitTemporaryDirectory():
with patch("aider.coders.base_coder.RepoMap") as MockRepoMap: