mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-03 19:24:59 +00:00
Merge branch 'Aider-AI:main' into main
This commit is contained in:
commit
23c9d9c34d
3 changed files with 66 additions and 7 deletions
|
@ -1626,10 +1626,6 @@ class Coder:
|
||||||
mentioned_rel_fnames = set()
|
mentioned_rel_fnames = set()
|
||||||
fname_to_rel_fnames = {}
|
fname_to_rel_fnames = {}
|
||||||
for rel_fname in addable_rel_fnames:
|
for rel_fname in addable_rel_fnames:
|
||||||
# Skip files that share a basename with files already in chat
|
|
||||||
if os.path.basename(rel_fname) in existing_basenames:
|
|
||||||
continue
|
|
||||||
|
|
||||||
normalized_rel_fname = rel_fname.replace("\\", "/")
|
normalized_rel_fname = rel_fname.replace("\\", "/")
|
||||||
normalized_words = set(word.replace("\\", "/") for word in words)
|
normalized_words = set(word.replace("\\", "/") for word in words)
|
||||||
if normalized_rel_fname in normalized_words:
|
if normalized_rel_fname in normalized_words:
|
||||||
|
@ -1644,6 +1640,10 @@ class Coder:
|
||||||
fname_to_rel_fnames[fname].append(rel_fname)
|
fname_to_rel_fnames[fname].append(rel_fname)
|
||||||
|
|
||||||
for fname, rel_fnames in fname_to_rel_fnames.items():
|
for fname, rel_fnames in fname_to_rel_fnames.items():
|
||||||
|
# If the basename is already in chat, don't add based on a basename mention
|
||||||
|
if fname in existing_basenames:
|
||||||
|
continue
|
||||||
|
# If the basename mention is unique among addable files and present in the text
|
||||||
if len(rel_fnames) == 1 and fname in words:
|
if len(rel_fnames) == 1 and fname in words:
|
||||||
mentioned_rel_fnames.add(rel_fnames[0])
|
mentioned_rel_fnames.add(rel_fnames[0])
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,27 @@ nav_order: 28
|
||||||
|
|
||||||
# Models and API keys
|
# Models and API keys
|
||||||
|
|
||||||
You need to tell aider which LLM to use and provide an API key.
|
Aider needs to know which LLM model you would like to work with and which keys
|
||||||
|
to provide when accessing it via API.
|
||||||
|
|
||||||
|
## Defaults
|
||||||
|
|
||||||
|
If you don't explicitly name a model, aider will try to select a model
|
||||||
|
for you to work with.
|
||||||
|
|
||||||
|
First, aider will check which
|
||||||
|
[keys you have provided via the environment, config files, or command line arguments](https://aider.chat/docs/config/api-keys.html).
|
||||||
|
Based on the available keys, aider will select the best model to use.
|
||||||
|
|
||||||
|
If you have not provided any keys, aider will offer to help you connect to
|
||||||
|
[OpenRouter](http://openrouter.ai)
|
||||||
|
which provides both free and paid access to most popular LLMs.
|
||||||
|
Once connected, aider will select the best model available on OpenRouter
|
||||||
|
based on whether you have a free or paid account there.
|
||||||
|
|
||||||
|
## Specifying model & key
|
||||||
|
|
||||||
|
You can also tell aider which LLM to use and provide an API key.
|
||||||
The easiest way is to use the `--model` and `--api-key`
|
The easiest way is to use the `--model` and `--api-key`
|
||||||
command line arguments, like this:
|
command line arguments, like this:
|
||||||
|
|
||||||
|
|
|
@ -194,8 +194,8 @@ class TestCoder(unittest.TestCase):
|
||||||
mock.return_value = set([str(fname1), str(fname2), str(fname3)])
|
mock.return_value = set([str(fname1), str(fname2), str(fname3)])
|
||||||
coder.repo.get_tracked_files = mock
|
coder.repo.get_tracked_files = mock
|
||||||
|
|
||||||
# Check that file mentions skip files with duplicate basenames
|
# Check that file mentions of a pure basename skips files with duplicate basenames
|
||||||
mentioned = coder.get_file_mentions(f"Check {fname2} and {fname3}")
|
mentioned = coder.get_file_mentions(f"Check {fname2.name} and {fname3}")
|
||||||
self.assertEqual(mentioned, {str(fname3)})
|
self.assertEqual(mentioned, {str(fname3)})
|
||||||
|
|
||||||
# Add a read-only file with same basename
|
# Add a read-only file with same basename
|
||||||
|
@ -366,6 +366,45 @@ class TestCoder(unittest.TestCase):
|
||||||
f"Failed to extract mentions from: {content}",
|
f"Failed to extract mentions from: {content}",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_get_file_mentions_multiline_backticks(self):
|
||||||
|
with GitTemporaryDirectory():
|
||||||
|
io = InputOutput(pretty=False, yes=True)
|
||||||
|
coder = Coder.create(self.GPT35, None, io)
|
||||||
|
|
||||||
|
# Create test files
|
||||||
|
test_files = [
|
||||||
|
"swebench/harness/test_spec/python.py",
|
||||||
|
"swebench/harness/test_spec/javascript.py",
|
||||||
|
]
|
||||||
|
for fname in test_files:
|
||||||
|
fpath = Path(fname)
|
||||||
|
fpath.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
fpath.touch()
|
||||||
|
|
||||||
|
# Mock get_addable_relative_files to return our test files
|
||||||
|
coder.get_addable_relative_files = MagicMock(return_value=set(test_files))
|
||||||
|
|
||||||
|
# Input text with multiline backticked filenames
|
||||||
|
content = """
|
||||||
|
Could you please **add the following files to the chat**?
|
||||||
|
|
||||||
|
1. `swebench/harness/test_spec/python.py`
|
||||||
|
2. `swebench/harness/test_spec/javascript.py`
|
||||||
|
|
||||||
|
Once I have these, I can show you precisely how to do the thing.
|
||||||
|
"""
|
||||||
|
expected_mentions = {
|
||||||
|
"swebench/harness/test_spec/python.py",
|
||||||
|
"swebench/harness/test_spec/javascript.py",
|
||||||
|
}
|
||||||
|
|
||||||
|
mentioned_files = coder.get_file_mentions(content)
|
||||||
|
self.assertEqual(
|
||||||
|
mentioned_files,
|
||||||
|
expected_mentions,
|
||||||
|
f"Failed to extract mentions from multiline backticked content: {content}",
|
||||||
|
)
|
||||||
|
|
||||||
def test_get_file_mentions_path_formats(self):
|
def test_get_file_mentions_path_formats(self):
|
||||||
with GitTemporaryDirectory():
|
with GitTemporaryDirectory():
|
||||||
io = InputOutput(pretty=False, yes=True)
|
io = InputOutput(pretty=False, yes=True)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue