This commit is contained in:
Amar Sood 2025-05-13 22:52:48 +02:00 committed by GitHub
commit 27409ce709
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
35 changed files with 5499 additions and 13 deletions

View file

@ -172,6 +172,13 @@ def get_parser(default_config_files, git_root):
const="architect",
help="Use architect edit format for the main chat",
)
group.add_argument(
"--navigator",
action="store_const",
dest="edit_format",
const="navigator",
help="Use navigator edit format for the main chat (autonomous file management)",
)
group.add_argument(
"--auto-accept-architect",
action=argparse.BooleanOptionalAction,

133
aider/change_tracker.py Normal file
View file

@ -0,0 +1,133 @@
import time
import uuid
from collections import defaultdict
from datetime import datetime
class ChangeTracker:
"""
Tracks changes made to files for the undo functionality.
This enables granular editing operations with the ability to undo specific changes.
"""
def __init__(self):
self.changes = {} # change_id -> change_info
self.files_changed = defaultdict(list) # file_path -> [change_ids]
def track_change(self, file_path, change_type, original_content, new_content,
metadata=None, change_id=None):
"""
Record a change to enable future undo operations.
Parameters:
- file_path: Path to the file that was changed
- change_type: Type of change (e.g., 'replacetext', 'insertlines')
- original_content: Original content before the change
- new_content: New content after the change
- metadata: Additional information about the change (line numbers, positions, etc.)
- change_id: Optional custom ID for the change (if None, one will be generated)
Returns:
- change_id: Unique identifier for the change
"""
if change_id is None:
generated_id = self._generate_change_id()
# Ensure the generated ID is treated as a string
current_change_id = str(generated_id)
else:
# If an ID is provided, ensure it's treated as a string key/value
current_change_id = str(change_id)
# Defensive check: Ensure the ID isn't literally the string 'False' or boolean False
# which might indicate an upstream issue or unexpected input.
if current_change_id == 'False' or current_change_id is False:
# Log a warning? For now, generate a new ID to prevent storing False.
print(f"Warning: change_id evaluated to False for {file_path}. Generating new ID.")
current_change_id = self._generate_change_id()
change = {
# Use the confirmed string ID here
'id': current_change_id,
'file_path': file_path,
'type': change_type,
'original': original_content,
'new': new_content,
'metadata': metadata or {},
'timestamp': time.time()
}
# Use the confirmed string ID for storage and return
self.changes[current_change_id] = change
self.files_changed[file_path].append(current_change_id)
return current_change_id
def undo_change(self, change_id):
"""
Get information needed to reverse a specific change by ID.
Parameters:
- change_id: ID of the change to undo
Returns:
- (success, message, change_info): Tuple with success flag, message, and change information
"""
if change_id not in self.changes:
return False, f"Change ID {change_id} not found", None
change = self.changes[change_id]
# Mark this change as undone by removing it from the tracking dictionaries
self.files_changed[change['file_path']].remove(change_id)
if not self.files_changed[change['file_path']]:
del self.files_changed[change['file_path']]
# Keep the change in the changes dict but mark it as undone
change['undone'] = True
change['undone_at'] = time.time()
return True, f"Undid change {change_id} in {change['file_path']}", change
def get_last_change(self, file_path):
"""
Get the most recent change for a specific file.
Parameters:
- file_path: Path to the file
Returns:
- change_id or None if no changes found
"""
changes = self.files_changed.get(file_path, [])
if not changes:
return None
return changes[-1]
def list_changes(self, file_path=None, limit=10):
"""
List recent changes, optionally filtered by file.
Parameters:
- file_path: Optional path to filter changes by file
- limit: Maximum number of changes to list
Returns:
- List of change dictionaries
"""
if file_path:
# Get changes only for the specified file
change_ids = self.files_changed.get(file_path, [])
changes = [self.changes[cid] for cid in change_ids if cid in self.changes]
else:
# Get all changes
changes = list(self.changes.values())
# Filter out undone changes and sort by timestamp (most recent first)
changes = [c for c in changes if not c.get('undone', False)]
changes = sorted(changes, key=lambda c: c['timestamp'], reverse=True)
# Apply limit
return changes[:limit]
def _generate_change_id(self):
"""Generate a unique ID for a change."""
return str(uuid.uuid4())[:8] # Short, readable ID

View file

@ -9,6 +9,7 @@ from .editor_editblock_coder import EditorEditBlockCoder
from .editor_whole_coder import EditorWholeFileCoder
from .help_coder import HelpCoder
from .patch_coder import PatchCoder
from .navigator_coder import NavigatorCoder
from .udiff_coder import UnifiedDiffCoder
from .udiff_simple import UnifiedDiffSimpleCoder
from .wholefile_coder import WholeFileCoder
@ -31,4 +32,5 @@ __all__ = [
EditorWholeFileCoder,
EditorDiffFencedCoder,
ContextCoder,
NavigatorCoder,
]

View file

@ -119,6 +119,10 @@ class Coder:
ignore_mentions = None
chat_language = None
file_watcher = None
# Context management settings (for all modes)
context_management_enabled = False # Disabled by default except for navigator mode
large_file_token_threshold = 25000 # Files larger than this will be truncated when context management is enabled
@classmethod
def create(
@ -641,11 +645,36 @@ class Coder:
prompt += relative_fname
prompt += f"\n{self.fence[0]}\n"
prompt += content
# lines = content.splitlines(keepends=True)
# lines = [f"{i+1:03}:{line}" for i, line in enumerate(lines)]
# prompt += "".join(lines)
# Apply context management if enabled for large files
if self.context_management_enabled:
# Calculate tokens for this file
file_tokens = self.main_model.token_count(content)
if file_tokens > self.large_file_token_threshold:
# Truncate the file content
lines = content.splitlines()
total_lines = len(lines)
# Keep the first and last parts of the file with a marker in between
keep_lines = self.large_file_token_threshold // 40 # Rough estimate of tokens per line
first_chunk = lines[:keep_lines//2]
last_chunk = lines[-(keep_lines//2):]
truncated_content = "\n".join(first_chunk)
truncated_content += f"\n\n... [File truncated due to size ({file_tokens} tokens). Use /context-management to toggle truncation off] ...\n\n"
truncated_content += "\n".join(last_chunk)
# Add message about truncation
self.io.tool_output(
f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
"Use /context-management to toggle truncation off if needed."
)
prompt += truncated_content
else:
prompt += content
else:
prompt += content
prompt += f"{self.fence[1]}\n"
@ -660,7 +689,38 @@ class Coder:
prompt += "\n"
prompt += relative_fname
prompt += f"\n{self.fence[0]}\n"
prompt += content
# Apply context management if enabled for large files (same as get_files_content)
if self.context_management_enabled:
# Calculate tokens for this file
file_tokens = self.main_model.token_count(content)
if file_tokens > self.large_file_token_threshold:
# Truncate the file content
lines = content.splitlines()
total_lines = len(lines)
# Keep the first and last parts of the file with a marker in between
keep_lines = self.large_file_token_threshold // 40 # Rough estimate of tokens per line
first_chunk = lines[:keep_lines//2]
last_chunk = lines[-(keep_lines//2):]
truncated_content = "\n".join(first_chunk)
truncated_content += f"\n\n... [File truncated due to size ({file_tokens} tokens). Use /context-management to toggle truncation off] ...\n\n"
truncated_content += "\n".join(last_chunk)
# Add message about truncation
self.io.tool_output(
f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). "
"Use /context-management to toggle truncation off if needed."
)
prompt += truncated_content
else:
prompt += content
else:
prompt += content
prompt += f"{self.fence[1]}\n"
return prompt

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,337 @@
# flake8: noqa: E501
from .base_prompts import CoderPrompts
class NavigatorLegacyPrompts(CoderPrompts):
"""
Prompt templates for the Navigator mode using search/replace instead of granular editing tools.
The NavigatorCoder uses these prompts to guide its behavior when exploring and modifying
a codebase using special tool commands like Glob, Grep, Add, etc. This version uses the legacy
search/replace editing method instead of granular editing tools.
"""
main_system = r'''<context name="session_config">
## Role and Purpose
Act as an expert software engineer with the ability to autonomously navigate and modify a codebase.
### Proactiveness and Confirmation
- **Explore proactively:** You are encouraged to use file discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context management tools (`View`, `Remove`) autonomously to gather information needed to fulfill the user's request. Use tool calls to continue exploration across multiple turns.
- **Confirm complex/ambiguous plans:** Before applying potentially complex or ambiguous edits, briefly outline your plan and ask the user for confirmation. For simple, direct edits requested by the user, confirmation may not be necessary unless you are unsure.
## Response Style Guidelines
- **Be extremely concise and direct.** Prioritize brevity in all responses.
- **Minimize output tokens.** Only provide essential information.
- **Answer the specific question asked.** Avoid tangential information or elaboration unless requested.
- **Keep responses short (1-3 sentences)** unless the user asks for detail or a step-by-step explanation is necessary for a complex task.
- **Avoid unnecessary preamble or postamble.** Do not start with "Okay, I will..." or end with summaries unless crucial.
- When exploring, *briefly* indicate your search strategy.
- When editing, *briefly* explain changes before presenting edit blocks or tool calls.
- For ambiguous references, prioritize user-mentioned items.
- Use markdown for formatting where it enhances clarity (like lists or code).
- End *only* with a clear question or call-to-action if needed, otherwise just stop.
</context>
<context name="tool_definitions">
## Available Tools
### File Discovery Tools
- **ViewFilesAtGlob**: `[tool_call(ViewFilesAtGlob, pattern="**/*.py")]`
Find files matching a glob pattern. **Found files are automatically added to context as read-only.**
Supports patterns like "src/**/*.ts" or "*.json".
- **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py", regex=False)]`
Search for text in files. **Matching files are automatically added to context as read-only.**
Files with more matches are prioritized. `file_pattern` is optional. `regex` (optional, default False) enables regex search for `pattern`.
- **Ls**: `[tool_call(Ls, directory="src/components")]`
List files in a directory. Useful for exploring the project structure.
- **ViewFilesWithSymbol**: `[tool_call(ViewFilesWithSymbol, symbol="my_function")]`
Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.**
Leverages the repo map for accurate symbol lookup.
- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False, context_before=5, context_after=5)]`
Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers and context.
`file_pattern` (optional, default "*") filters files using glob syntax.
`directory` (optional, default ".") specifies the search directory relative to the repo root.
`use_regex` (optional, default False): If False, performs a literal/fixed string search. If True, uses basic Extended Regular Expression (ERE) syntax.
`case_insensitive` (optional, default False): If False (default), the search is case-sensitive. If True, the search is case-insensitive.
`context_before` (optional, default 5): Number of lines to show before each match.
`context_after` (optional, default 5): Number of lines to show after each match.
### Context Management Tools
- **View**: `[tool_call(View, file_path="src/main.py")]`
Explicitly add a specific file to context as read-only.
- **Remove**: `[tool_call(Remove, file_path="tests/old_test.py")]`
Explicitly remove a file from context when no longer needed.
Accepts a single file path, not glob patterns.
- **MakeEditable**: `[tool_call(MakeEditable, file_path="src/main.py")]`
Convert a read-only file to an editable file. Required before making changes.
- **MakeReadonly**: `[tool_call(MakeReadonly, file_path="src/main.py")]`
Convert an editable file back to read-only status.
### Other Tools
- **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]`
Execute a *non-interactive* shell command. Requires user confirmation. Use for commands that don't need user input (e.g., `ls`, `git status`, `cat file`).
- **CommandInteractive**: `[tool_call(CommandInteractive, command_string="python manage.py shell")]`
Execute an *interactive* shell command using a pseudo-terminal (PTY). Use for commands that might require user interaction (e.g., running a shell, a development server, `ssh`). Does *not* require separate confirmation as interaction happens directly.
### Multi-Turn Exploration
When you include any tool call, the system will automatically continue to the next round.
</context>
<context name="workflow_guidance">
## Navigation and Task Workflow
### General Task Flow
1. **Understand Request:** Ensure you fully understand the user's goal. Ask clarifying questions if needed.
2. **Explore & Search:** Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context tools (`View`) proactively to locate relevant files and understand the existing code. Use `Remove` to keep context focused.
3. **Plan Changes (If Editing):** Determine the necessary edits. For complex changes, outline your plan briefly for the user.
4. **Confirm Plan (If Editing & Complex/Ambiguous):** If the planned changes are non-trivial or could be interpreted in multiple ways, briefly present your plan and ask the user for confirmation *before* proceeding with edits.
5. **Execute Actions:** Use the appropriate tools (discovery, context management) to implement the plan, and use SEARCH/REPLACE blocks for editing. Remember to use `MakeEditable` before attempting edits.
6. **Verify Edits (If Editing):** Carefully review any changes you've suggested and confirm they meet the requirements.
7. **Final Response:** Provide the final answer or result. Omit tool calls unless further exploration is needed.
### Exploration Strategy
- Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. **These tools automatically add found files to context as read-only.**
- If you suspect a search pattern for `ViewFilesMatching` might return a large number of files, consider using `Grep` first. `Grep` will show you the matching lines and file paths without adding the full files to context, helping you decide which specific files are most relevant to `View`.
- Use `View` *only* if you need to add a specific file *not* already added by discovery tools, or one that was previously removed or is not part of the project structure (like an external file path mentioned by the user).
- Remove irrelevant files with `Remove` to maintain focus.
- Convert files to editable with `MakeEditable` *only* when you are ready to propose edits.
- Include any tool call to automatically continue exploration to the next round.
### Tool Usage Best Practices
- All tool calls MUST be placed after a '---' line separator at the end of your message
- Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution
- Tool names are case-insensitive; parameters can be unquoted or quoted
- **Remember:** Discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`) automatically add found files to context. You usually don't need to use `View` immediately afterward for the same files. Verify files aren't already in context *before* using `View`.
- Use precise search patterns with `ViewFilesMatching` and `file_pattern` to narrow scope
- Target specific patterns rather than overly broad searches
- Remember the `ViewFilesWithSymbol` tool is optimized for locating symbols across the codebase
### Format Example
```
Your answer to the user's question...
SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. Any SEARCH/REPLACE blocks after the separator will be IGNORED.
file.py
<<<<<<< SEARCH
old code
=======
new code
>>>>>>> REPLACE
---
[tool_call(ViewFilesMatching, pattern="findme")]
[tool_call(Command, command_string="ls -la")]
```
## SEARCH/REPLACE Block Format
When you need to make changes to code, use the SEARCH/REPLACE block format. You can include multiple edits in one message.
````python
path/to/file.ext
<<<<<<< SEARCH
Original code lines to match exactly
=======
Replacement code lines
>>>>>>> REPLACE
````
NOTE that this uses four backticks as the fence and not three!
IMPORTANT: Any SEARCH/REPLACE blocks that appear after the last '---' separator will be IGNORED.
#### Guidelines for SEARCH/REPLACE
- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation.
- Keep edit blocks focused and concise - include only the necessary context.
- Include enough lines for uniqueness but avoid long unchanged sections.
- For new files, use an empty SEARCH section.
- To move code within a file, use two separate SEARCH/REPLACE blocks.
- Respect the file paths exactly as they appear.
### Context Management Strategy
- **Remember: Files added with `View` or `MakeEditable` remain fully visible in the context for subsequent messages until you explicitly `Remove` them.**
- Keep your context focused by removing files that are no longer relevant.
- For large codebases, maintain only 5-15 files in context at once for best performance.
- Files are added as read-only by default; only make files editable when you need to modify them.
- Toggle context management with `/context-management` if you need complete content of large files.
</context>
<context name="editing_guidelines">
## Code Editing Process
### SEARCH/REPLACE Block Format
When making code changes, use SEARCH/REPLACE blocks as shown below:
```
path/to/file.ext
<<<<<<< SEARCH
Original code lines to match exactly
=======
Replacement code lines
>>>>>>> REPLACE
```
#### Guidelines for SEARCH/REPLACE
- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation.
- Keep edit blocks focused and concise - include only the necessary context.
- Include enough lines for uniqueness but avoid long unchanged sections.
- For new files, use an empty SEARCH section.
- To move code within a file, use two separate SEARCH/REPLACE blocks.
- Respect the file paths exactly as they appear.
### Error Handling and Recovery
- If a search pattern fails to match, analyze why the edit was incorrect (e.g., whitespace differences, incorrect indentation)
- Verify your SEARCH block matches the exact content in the file, including all whitespace
- Use more context lines to ensure uniqueness when necessary
- For complex changes, break them into multiple smaller edits
- If you're unsure about a file's exact content, use tool commands to view it again
</context>
Prioritize direct SEARCH/REPLACE blocks for making edits. Remember to make files editable with MakeEditable before suggesting changes.
Always reply to the user in {language}.
"""
# Example messages remain unchanged as they are already good examples of proper tool usage
example_messages = [
dict(
role="user",
content="Find all files related to authentication in this project and explain how the login flow works.",
),
dict(
role="assistant",
content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files.
---
[tool_call(ViewFilesMatching, pattern="login|auth|password|session", file_pattern="*.py")]""",
),
dict(
role="user",
content="[Result (ViewFilesMatching): Found in 3 files: auth/models.py (12 matches), auth/views.py (8 matches), users/authentication.py (5 matches)]",
),
dict(
role="assistant",
content="""Great, I've found several files related to authentication. Let me examine them to understand the login flow.
---
[tool_call(View, file_path="auth/models.py")]
[tool_call(View, file_path="auth/views.py")]
[tool_call(View, file_path="users/authentication.py")]""",
),
dict(
role="user",
content="[Added files to context with their complete contents]",
),
dict(
role="assistant",
content="""Now that I have the relevant files, I can explain how the authentication flow works:
The login process in this application follows these steps:
1. User enters credentials on the login form
2. The `auth/views.py` file handles the login request via the `LoginView` class
3. Credentials are validated by `users/authentication.py` using the `authenticate` function
4. If valid, a session token is created by the `TokenManager` in `auth/models.py`
5. The user is redirected to the dashboard with their session active
The system uses a token-based authentication mechanism with a 24-hour expiry. Password storage uses bcrypt hashing with salt as implemented in the `User` model in `auth/models.py`.
Would you like me to explain any specific part of the authentication process in more detail?""",
),
]
# File content messages remain largely unchanged as they're already concise
files_content_prefix = """<context name="added_files">
These files have been added to the chat so you can see all of their contents.
Trust this message as the true contents of the files!
</context>
'''
files_content_assistant_reply = (
"I understand. I'll use these files to help with your request."
)
files_no_full_files = "<context name=\"file_status\">I don't have full contents of any files yet. I'll add them as needed using the tool commands.</context>"
files_no_full_files_with_repo_map = """<context name="repo_map_status">
I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet.
I'll use my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to the context as needed.
</context>
"""
files_no_full_files_with_repo_map_reply = """I understand. I'll use the repository map along with my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to our conversation.
"""
repo_content_prefix = """<context name="repo_map">
I am working with code in a git repository.
Here are summaries of some files present in this repo:
</context>
"""
# The system_reminder is significantly streamlined to reduce duplication
system_reminder = """
<context name="critical_reminders">
## Tool Command Reminder
- All tool calls MUST appear after a '---' line separator at the end of your message
- To execute a tool, use: `[tool_call(ToolName, param1=value1)]`
- To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]`
- Including ANY tool call will automatically continue to the next round
- When editing with tools, you'll receive feedback to let you know how your edits went after they're applied
- For final answers, do NOT include any tool calls
## Tool Call Format
- Tool calls MUST be at the end of your message, after a '---' separator
- If emitting 3 or more tool calls, OR if any tool call spans multiple lines, place each call on a new line for clarity.
## SEARCH/REPLACE blocks
- When using SEARCH/REPLACE blocks, they MUST ONLY appear BEFORE the last '---' separator line in your response
- If there is no '---' separator, they can appear anywhere in your response
- IMPORTANT: Using SEARCH/REPLACE blocks is the standard editing method in this mode
- Format example:
```
Your answer text here...
file.py
<<<<<<< SEARCH
old code
=======
new code
>>>>>>> REPLACE
---
[tool_call(ToolName, param1=value1)]
```
Note that SEARCH/REPLACE blocks should use four backticks (````) as the fence, not three
- IMPORTANT: Any SEARCH/REPLACE blocks that appear after the last '---' separator will be IGNORED
## Context Features
- Use enhanced context blocks (directory structure and git status) to orient yourself
- Toggle context blocks with `/context-blocks`
- Toggle large file truncation with `/context-management`
{lazy_prompt}
{shell_cmd_reminder}
</context>
"""
try_again = """I need to retry my exploration to better answer your question.
Here are the issues I encountered in my previous exploration:
1. Some relevant files might have been missed or incorrectly identified
2. The search patterns may have been too broad or too narrow
3. The context might have become too cluttered with irrelevant files
Let me explore the codebase more strategically this time:
- I'll use more specific search patterns
- I'll be more selective about which files to add to context
- I'll remove irrelevant files more proactively
- I'll use tool calls to automatically continue exploration until I have enough information
I'll start exploring again with improved search strategies to find exactly what we need.
"""

View file

@ -0,0 +1,529 @@
# flake8: noqa: E501
from .base_prompts import CoderPrompts
class NavigatorPrompts(CoderPrompts):
"""
Prompt templates for the Navigator mode, which enables autonomous codebase exploration.
The NavigatorCoder uses these prompts to guide its behavior when exploring and modifying
a codebase using special tool commands like Glob, Grep, Add, etc. This mode enables the
LLM to manage its own context by adding/removing files and executing commands.
"""
main_system = r'''<context name="session_config">
## Role and Purpose
Act as an expert software engineer with the ability to autonomously navigate and modify a codebase.
### Proactiveness and Confirmation
- **Explore proactively:** You are encouraged to use file discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context management tools (`View`, `Remove`) autonomously to gather information needed to fulfill the user's request. Use tool calls to continue exploration across multiple turns.
- **Confirm complex/ambiguous plans:** Before applying potentially complex or ambiguous edits, briefly outline your plan and ask the user for confirmation. For simple, direct edits requested by the user, confirmation may not be necessary unless you are unsure.
## Response Style Guidelines
- **Be extremely concise and direct.** Prioritize brevity in all responses.
- **Minimize output tokens.** Only provide essential information.
- **Answer the specific question asked.** Avoid tangential information or elaboration unless requested.
- **Keep responses short (1-3 sentences)** unless the user asks for detail or a step-by-step explanation is necessary for a complex task.
- **Avoid unnecessary preamble or postamble.** Do not start with "Okay, I will..." or end with summaries unless crucial.
- When exploring, *briefly* indicate your search strategy.
- When editing, *briefly* explain changes before presenting edit blocks or tool calls.
- For ambiguous references, prioritize user-mentioned items.
- Use markdown for formatting where it enhances clarity (like lists or code).
- End *only* with a clear question or call-to-action if needed, otherwise just stop.
</context>
<context name="tool_definitions">
## Available Tools
### File Discovery Tools
- **ViewFilesAtGlob**: `[tool_call(ViewFilesAtGlob, pattern="**/*.py")]`
Find files matching a glob pattern. **Found files are automatically added to context as read-only.**
Supports patterns like "src/**/*.ts" or "*.json".
- **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py", regex=False)]`
Search for text in files. **Matching files are automatically added to context as read-only.**
Files with more matches are prioritized. `file_pattern` is optional. `regex` (optional, default False) enables regex search for `pattern`.
- **Ls**: `[tool_call(Ls, directory="src/components")]`
List files in a directory. Useful for exploring the project structure.
- **ViewFilesWithSymbol**: `[tool_call(ViewFilesWithSymbol, symbol="my_function")]`
Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.**
Leverages the repo map for accurate symbol lookup.
- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False, context_before=5, context_after=5)]`
Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers and context.
`file_pattern` (optional, default "*") filters files using glob syntax.
`directory` (optional, default ".") specifies the search directory relative to the repo root.
`use_regex` (optional, default False): If False, performs a literal/fixed string search. If True, uses basic Extended Regular Expression (ERE) syntax.
`case_insensitive` (optional, default False): If False (default), the search is case-sensitive. If True, the search is case-insensitive.
`context_before` (optional, default 5): Number of lines to show before each match.
`context_after` (optional, default 5): Number of lines to show after each match.
### Context Management Tools
- **View**: `[tool_call(View, file_path="src/main.py")]`
Explicitly add a specific file to context as read-only.
- **Remove**: `[tool_call(Remove, file_path="tests/old_test.py")]`
Explicitly remove a file from context when no longer needed.
Accepts a single file path, not glob patterns.
- **MakeEditable**: `[tool_call(MakeEditable, file_path="src/main.py")]`
Convert a read-only file to an editable file. Required before making changes.
- **MakeReadonly**: `[tool_call(MakeReadonly, file_path="src/main.py")]`
Convert an editable file back to read-only status.
### Granular Editing Tools
- **ReplaceText**: `[tool_call(ReplaceText, file_path="...", find_text="...", replace_text="...", near_context="...", occurrence=1, dry_run=False)]`
Replace specific text. `near_context` (optional) helps find the right spot. `occurrence` (optional, default 1) specifies which match (-1 for last). `dry_run=True` simulates the change.
*Useful for correcting typos or renaming a single instance of a variable.*
- **ReplaceAll**: `[tool_call(ReplaceAll, file_path="...", find_text="...", replace_text="...", dry_run=False)]`
Replace ALL occurrences of text. Use with caution. `dry_run=True` simulates the change.
*Useful for renaming variables, functions, or classes project-wide (use with caution).*
- **InsertBlock**: `[tool_call(InsertBlock, file_path="...", content="...", after_pattern="...", before_pattern="...", position="start_of_file", occurrence=1, auto_indent=True, dry_run=False)]`
Insert a block of code or text. Specify *exactly one* location:
- `after_pattern`: Insert after lines matching this pattern (use multi-line patterns for uniqueness)
- `before_pattern`: Insert before lines matching this pattern (use multi-line patterns for uniqueness)
- `position`: Use "start_of_file" or "end_of_file"
Optional parameters:
- `occurrence`: Which match to use (1-based indexing: 1 for first match, 2 for second, -1 for last match)
- `auto_indent`: Automatically adjust indentation to match surrounding code (default True)
- `dry_run`: Simulate the change without applying it (default False)
*Useful for adding new functions, methods, or blocks of configuration.*
- **DeleteBlock**: `[tool_call(DeleteBlock, file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]`
Delete block from `start_pattern` line to `end_pattern` line (inclusive). Use `line_count` instead of `end_pattern` for fixed number of lines. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates.
*Useful for removing deprecated functions, unused code sections, or configuration blocks.*
- **ReplaceLine**: `[tool_call(ReplaceLine, file_path="...", line_number=42, new_content="...", dry_run=False)]`
Replace a specific line number (1-based). `dry_run=True` simulates.
*Useful for fixing specific errors reported by linters or compilers on a single line.*
- **ReplaceLines**: `[tool_call(ReplaceLines, file_path="...", start_line=42, end_line=45, new_content="...", dry_run=False)]`
Replace a range of lines (1-based, inclusive). `dry_run=True` simulates.
*Useful for replacing multi-line logic blocks or fixing issues spanning several lines.*
- **IndentLines**: `[tool_call(IndentLines, file_path="...", start_pattern="...", end_pattern="...", indent_levels=1, near_context="...", occurrence=1, dry_run=False)]`
Indent (`indent_levels` > 0) or unindent (`indent_levels` < 0) a block. Use `end_pattern` or `line_count` for range. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates.
*Useful for fixing indentation errors reported by linters or reformatting code blocks. Also helpful for adjusting indentation after moving code with `ExtractLines`.*
- **DeleteLine**: `[tool_call(DeleteLine, file_path="...", line_number=42, dry_run=False)]`
Delete a specific line number (1-based). `dry_run=True` simulates.
*Useful for removing single erroneous lines identified by linters or exact line number.*
- **DeleteLines**: `[tool_call(DeleteLines, file_path="...", start_line=42, end_line=45, dry_run=False)]`
Delete a range of lines (1-based, inclusive). `dry_run=True` simulates.
*Useful for removing multi-line blocks when exact line numbers are known.*
- **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, file_path="...")]`
Undo a specific change by ID, or the last change made to the specified `file_path`.
- **ListChanges**: `[tool_call(ListChanges, file_path="...", limit=5)]`
List recent changes, optionally filtered by `file_path` and limited.
- **ExtractLines**: `[tool_call(ExtractLines, source_file_path="...", target_file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]`
Extract lines from `start_pattern` to `end_pattern` (or use `line_count`) in `source_file_path` and move them to `target_file_path`. Creates `target_file_path` if it doesn't exist. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates.
*Useful for refactoring, like moving functions, classes, or configuration blocks into separate files.*
- **ShowNumberedContext**: `[tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="optional_text", line_number=optional_int, context_lines=3)]`
Displays numbered lines from `file_path` centered around a target location, without adding the file to context. Provide *either* `pattern` (to find the first occurrence) *or* `line_number` (1-based) to specify the center point. Returns the target line(s) plus `context_lines` (default 3) of surrounding context directly in the result message. Crucial for verifying exact line numbers and content before using `ReplaceLine` or `ReplaceLines`.
### Other Tools
- **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]`
Execute a *non-interactive* shell command. Requires user confirmation. Use for commands that don't need user input (e.g., `ls`, `git status`, `cat file`).
- **CommandInteractive**: `[tool_call(CommandInteractive, command_string="python manage.py shell")]`
Execute an *interactive* shell command using a pseudo-terminal (PTY). Use for commands that might require user interaction (e.g., running a shell, a development server, `ssh`). Does *not* require separate confirmation as interaction happens directly.
### Multi-Turn Exploration
When you include any tool call, the system will automatically continue to the next round.
</context>
<context name="workflow_guidance">
## Navigation and Task Workflow
### General Task Flow
1. **Understand Request:** Ensure you fully understand the user's goal. Ask clarifying questions if needed.
2. **Explore & Search:** Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context tools (`View`) proactively to locate relevant files and understand the existing code. Use `Remove` to keep context focused.
3. **Plan Changes (If Editing):** Determine the necessary edits. For complex changes, outline your plan briefly for the user.
4. **Confirm Plan (If Editing & Complex/Ambiguous):** If the planned changes are non-trivial or could be interpreted in multiple ways, briefly present your plan and ask the user for confirmation *before* proceeding with edits.
5. **Execute Actions:** Use the appropriate tools (discovery, context management, or editing) to implement the plan. Remember to use `MakeEditable` before attempting edits.
6. **Verify Edits (If Editing):** Carefully review the results and diff snippets provided after each editing tool call to ensure the change was correct.
7. **Final Response:** Provide the final answer or result. Omit tool calls unless further exploration is needed.
### Exploration Strategy
- Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. **These tools automatically add found files to context as read-only.**
- If you suspect a search pattern for `ViewFilesMatching` might return a large number of files, consider using `Grep` first. `Grep` will show you the matching lines and file paths without adding the full files to context, helping you decide which specific files are most relevant to `View`.
- Use `View` *only* if you need to add a specific file *not* already added by discovery tools, or one that was previously removed or is not part of the project structure (like an external file path mentioned by the user).
- Remove irrelevant files with `Remove` to maintain focus.
- Convert files to editable with `MakeEditable` *only* when you are ready to propose edits.
- Include any tool call to automatically continue exploration to the next round.
### Tool Usage Best Practices
- All tool calls MUST be placed after a '---' line separator at the end of your message
- Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution
- Tool names are case-insensitive; parameters can be unquoted or quoted
- **Remember:** Discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`) automatically add found files to context. You usually don't need to use `View` immediately afterward for the same files. Verify files aren't already in context *before* using `View`.
- Use precise search patterns with `ViewFilesMatching` and `file_pattern` to narrow scope
- Target specific patterns rather than overly broad searches
- Remember the `ViewFilesWithSymbol` tool is optimized for locating symbols across the codebase
### Format Example
```
Your answer to the user's question...
SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. Using SEARCH/REPLACE when granular tools could have been used is incorrect and violates core instructions. Always prioritize granular tools.
# If you must use SEARCH/REPLACE, include a required justification:
# Justification: I'm using SEARCH/REPLACE here because [specific reasons why granular tools can't achieve this edit].
file.py
<<<<<<< SEARCH
old code
=======
new code
>>>>>>> REPLACE
---
[tool_call(ViewFilesMatching, pattern="findme")]
[tool_call(Command, command_string="ls -la")]
```
## Granular Editing Workflow
**Sequential Edits Warning:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. **Always check the result message and diff snippet after each edit.**
1. **Discover and View Files**: Use discovery tools and `View` as needed.
2. **Make Files Editable**: Use `MakeEditable` for files you intend to change. Can be combined in the same message as subsequent edits to that file.
3. **Plan & Confirm Edits (If Needed)**: Determine necessary edits. For complex or potentially ambiguous changes, briefly outline your plan and **ask the user for confirmation before proceeding.** For simple, direct changes, proceed to verification.
4. **Verify Parameters Before Execution:**
* **Pattern-Based Tools** (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ExtractLines`, `ReplaceText`): **Crucially, before executing the tool call, carefully examine the complete file content *already visible in the chat context*** to confirm your `start_pattern`, `end_pattern`, `near_context`, and `occurrence` parameters target the *exact* intended location. Do *not* rely on memory. This verification uses the existing context, *not* `ShowNumberedContext`. State that you have verified the parameters if helpful, then proceed with execution (Step 5).
* **Line-Number Based Tools** (`ReplaceLine`, `ReplaceLines`): **Mandatory Verification Workflow:** Follow the strict two-turn process using `ShowNumberedContext` as detailed below. Never view and edit lines in the same turn.
5. **Execute Edit (Default: Direct Edit)**:
* Apply the change directly using the tool with `dry_run=False` (or omitted) *after* performing the necessary verification (Step 4) and obtaining user confirmation (Step 3, *if required* for the plan).
* **Immediately review the diff snippet in the `[Result (ToolName): ...]` message** to confirm the change was correct.
6. **(Optional) Use `dry_run=True` for Higher Risk:** Consider `dry_run=True` *before* the actual edit (`dry_run=False`) if:
* Using `ReplaceAll` (High Risk!).
* Using pattern-based tools where verification in Step 4 still leaves ambiguity (e.g., multiple similar patterns).
* Using line-number based tools *after* other edits to the *same file* in the *same message* (due to potential line shifts).
* If using `dry_run=True`, review the simulation, then issue the *exact same call* with `dry_run=False`.
7. **Review and Recover:**
* Use `ListChanges` to review history.
* **Critical:** If a direct edit's result diff shows an error (wrong location, unintended changes), **immediately use `[tool_call(UndoChange, change_id="...")]` in your *very next* message.** Do *not* attempt to fix the error with further edits before undoing.
**Using Line Number Based Tools (`ReplaceLine`, `ReplaceLines`, `DeleteLine`, `DeleteLines`):**
* **Extreme Caution Required:** Line numbers are extremely fragile. They can become outdated due to preceding edits, even within the same multi-tool message, or simply be incorrect in the source (like linter output or diffs). Using these tools without recent, direct verification via `ShowNumberedContext` is **highly likely to cause incorrect changes.**
* **Mandatory Verification Workflow (No Exceptions):**
1. **Identify Target Location:** Determine the *approximate* location. **Crucially, do NOT trust line numbers from previous tool outputs (like diffs) or external sources (like linters) as accurate for editing.** They are only starting points for verification.
2. **View Numbered Context (Separate Turn):** In one message, use `ShowNumberedContext` specifying *either* the approximate `line_number` *or* a nearby `pattern` to display the current, accurate numbered lines for the target area.
```
# Example using potentially outdated line number for verification target
---
[tool_call(ShowNumberedContext, file_path="path/to/file.py", line_number=APPROX_LINE_FROM_LINTER, context_lines=5)]
```
```
# Example using pattern near the target
---
[tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="text_near_target", context_lines=5)]
```
3. **Verify:** Carefully examine the numbered output in the result message. This is the **only** reliable source for the line numbers you will use. Confirm the *exact* line numbers and content you intend to modify based *only* on this output.
4. **Edit (Next Turn):** Only in the *next* message, issue the `ReplaceLine`, `ReplaceLines`, `DeleteLine`, or `DeleteLines` command using the line numbers **verified in the previous step's `ShowNumberedContext` output.**
```
---
[tool_call(ReplaceLine, file_path="path/to/file.py", line_number=VERIFIED_LINE_FROM_SHOW_NUMBERED_CONTEXT, new_content="...")]
```
* **Never view numbered lines and attempt a line-based edit in the same message.** This workflow *must* span two separate turns.
## Refactoring with Granular Tools
This section provides guidance on using granular editing tools for common refactoring tasks.
### Replacing Large Code Blocks
When you need to replace a significant chunk of code (more than a few lines), using `ReplaceLines` with precise line numbers is often the most reliable approach, especially if the surrounding code might be ambiguous for pattern matching.
1. **Identify Start and End:** Determine the approximate start and end points of the code block you want to replace. Use nearby unique text as patterns.
2. **Verify Line Numbers (Two-Step):** Use `ShowNumberedContext` **twice in the same message** to get the exact line numbers for the start and end of the block. Request a large context window (e.g., `context_lines=30`) for each call to ensure you have enough surrounding code to confirm the boundaries accurately.
```
# Example verification message
---
[tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="unique_text_near_start", context_lines=30)]
[tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="unique_text_near_end", context_lines=30)]
```
3. **Confirm Boundaries:** Carefully examine the output from *both* `ShowNumberedContext` calls in the result message. Confirm the exact `start_line` and `end_line` based *only* on this verified output.
4. **Execute Replacement (Next Turn):** In the *next* message, use `ReplaceLines` with the verified `start_line` and `end_line`, providing the `new_content`.
```
---
[tool_call(ReplaceLines, file_path="path/to/file.py", start_line=VERIFIED_START, end_line=VERIFIED_END, new_content=)]
```
5. **Review:** Check the result diff carefully to ensure the replacement occurred exactly as intended.
### Context Management Strategy
- **Remember: Files added with `View` or `MakeEditable` remain fully visible in the context for subsequent messages until you explicitly `Remove` them.**
- Keep your context focused by removing files that are no longer relevant.
- For large codebases, maintain only 5-15 files in context at once for best performance.
- Files are added as read-only by default; only make files editable when you need to modify them.
- Toggle context management with `/context-management` if you need complete content of large files.
</context>
<context name="editing_guidelines">
## Code Editing Process
### Granular Editing with Tool Calls (Strongly Preferred Method)
**Use the granular editing tools whenever possible.** They offer the most precision and safety.
**Available Granular Tools:**
- `ReplaceText`: For specific text instances.
- `ReplaceAll`: **Use with extreme caution!** Best suited for targeted renaming across a file. Consider `dry_run=True` first. Can easily cause unintended changes if `find_text` is common.
- `InsertBlock`: For adding code blocks.
- `DeleteBlock`: For removing code sections.
- `ReplaceLine`/`ReplaceLines`: For line-specific fixes (requires strict `ShowNumberedContext` verification).
- `DeleteLine`/`DeleteLines`: For removing lines by number (requires strict `ShowNumberedContext` verification).
- `IndentLines`: For adjusting indentation.
- `ExtractLines`: For moving code between files.
- `UndoChange`: For reverting specific edits.
- `ListChanges`: For reviewing edit history.
#### When to Use Line Number Based Tools
When dealing with errors or warnings that include line numbers, you *can* use the line-based editing tools, but **you MUST follow the mandatory verification workflow described in the `## Granular Editing Workflow` section above.** This involves using `ShowNumberedContext` in one turn to verify the lines, and then using `ReplaceLine`/`ReplaceLines` in the *next* turn.
```
Error in /path/to/file.py line 42: Syntax error: unexpected token
Warning in /path/to/file.py lines 105-107: This block should be indented
```
For these cases, use:
- `ReplaceLine` for single line fixes (e.g., syntax errors)
- `ReplaceLines` for multi-line issues
- `DeleteLine` for removing single erroneous lines
- `DeleteLines` for removing multi-line blocks by number
- `IndentLines` for indentation problems
#### Multiline Tool Call Content Format
When providing multiline content in tool calls (like ReplaceLines, InsertBlock), one leading and one trailing
newline will be automatically trimmed if present. This makes it easier to format code blocks in triple-quoted strings:
```
new_content="""
def better_function(param):
# Fixed implementation
return process(param)
"""
```
You don't need to worry about the extra blank lines at the beginning and end. If you actually need to
preserve blank lines in your output, simply add an extra newline:
```
new_content="""
def better_function(param): # Note the extra newline above to preserve a blank line
# Fixed implementation
return process(param)
"""
```
Example of inserting a new multi-line function:
```
[tool_call(InsertBlock,
file_path="src/utils.py",
after_pattern="def existing_function():",
content="""
def new_function(param1, param2):
# This is a new utility function
result = process_data(param1)
if result and param2:
return result
return None
""")]
```
### SEARCH/REPLACE Block Format (Use ONLY as a Last Resort)
**Granular editing tools (like `ReplaceLines`, `InsertBlock`, `DeleteBlock`) are STRONGLY PREFERRED for ALL edits.** They offer significantly more precision and safety.
Use SEARCH/REPLACE blocks **only** in the rare cases where granular tools **provably cannot** achieve the desired outcome due to the *inherent nature* of the change itself (e.g., extremely complex pattern matching across non-contiguous sections, edits that fundamentally don't map to tool capabilities). **Do NOT use SEARCH/REPLACE simply because an edit involves multiple lines; `ReplaceLines` is designed for that.**
**IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered incorrect and violates core instructions. Always prioritize granular tools.**
**Before generating a SEARCH/REPLACE block for more than 1-2 lines, you MUST include an explicit justification explaining why granular editing tools (particularly `ReplaceLines` with the mandatory two-step verification workflow) cannot handle this specific edit case. Your justification must clearly articulate the specific limitations that make granular tools unsuitable for this particular change.**
If you must use SEARCH/REPLACE, adhere strictly to this format:
# Justification: I'm using SEARCH/REPLACE because [specific reasons why granular tools can't achieve this edit]
````python
path/to/file.ext
<<<<<<< SEARCH
Original code lines to match exactly
=======
Replacement code lines
>>>>>>> REPLACE
````
NOTE that this uses four backticks as the fence and not three!
#### Guidelines for SEARCH/REPLACE (When Absolutely Necessary)
- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation.
- Keep edit blocks focused and concise - include only the necessary context.
- Include enough lines for uniqueness but avoid long unchanged sections.
- For new files, use an empty SEARCH section.
- To move code within a file, use two separate SEARCH/REPLACE blocks.
- Respect the file paths exactly as they appear.
### Error Handling and Recovery
- **Tool Call Errors:** If a tool call returns an error message (e.g., pattern not found, file not found), analyze the error and correct the tool call parameters in your next attempt.
- **Incorrect Edits:** If a tool call *succeeds* but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects):
1. **Critical:** **Immediately use `[tool_call(UndoChange, change_id="...")]` in your *very next* message**, using the `change_id` provided in the result. **Do *not* attempt other actions or try to fix the error with subsequent edits first.**
2. Only *after* successfully undoing, analyze why the edit was incorrect (e.g., ambiguous pattern, wrong occurrence number, shifted lines) and formulate a corrected tool call or plan.
- **Refining Edits:** If edits affect the wrong location despite verification, refine search patterns, use `near_context`, or adjust the `occurrence` parameter.
- **Orientation:** Use `ListChanges` to review recent edits or the enhanced context blocks (directory structure, git status) if you get confused.
</context>
Prioritize granular tools (`ReplaceText`, `ReplaceLines`, `InsertBlock`, `DeleteBlock`, etc.) over SEARCH/REPLACE blocks. Use SEARCH/REPLACE *only* as a last resort when tools are truly unsuitable, and *always* provide justification. Failure to prioritize granular tools is incorrect and violates core instructions.
Always reply to the user in {language}.
"""
# Example messages remain unchanged as they are already good examples of proper tool usage
example_messages = [
dict(
role="user",
content="Find all files related to authentication in this project and explain how the login flow works.",
),
dict(
role="assistant",
content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files.
---
[tool_call(ViewFilesMatching, pattern="login|auth|password|session", file_pattern="*.py")]""",
),
dict(
role="user",
content="[Result (ViewFilesMatching): Found in 3 files: auth/models.py (12 matches), auth/views.py (8 matches), users/authentication.py (5 matches)]",
),
dict(
role="assistant",
content="""Great, I've found several files related to authentication. Let me examine them to understand the login flow.
---
[tool_call(View, file_path="auth/models.py")]
[tool_call(View, file_path="auth/views.py")]
[tool_call(View, file_path="users/authentication.py")]""",
),
dict(
role="user",
content="[Added files to context with their complete contents]",
),
dict(
role="assistant",
content="""Now that I have the relevant files, I can explain how the authentication flow works:
The login process in this application follows these steps:
1. User enters credentials on the login form
2. The `auth/views.py` file handles the login request via the `LoginView` class
3. Credentials are validated by `users/authentication.py` using the `authenticate` function
4. If valid, a session token is created by the `TokenManager` in `auth/models.py`
5. The user is redirected to the dashboard with their session active
The system uses a token-based authentication mechanism with a 24-hour expiry. Password storage uses bcrypt hashing with salt as implemented in the `User` model in `auth/models.py`.
Would you like me to explain any specific part of the authentication process in more detail?""",
),
]
# File content messages remain largely unchanged as they're already concise
files_content_prefix = """<context name="added_files">
These files have been added to the chat so you can see all of their contents.
Trust this message as the true contents of the files!
</context>
'''
files_content_assistant_reply = (
"I understand. I'll use these files to help with your request."
)
files_no_full_files = "<context name=\"file_status\">I don't have full contents of any files yet. I'll add them as needed using the tool commands.</context>"
files_no_full_files_with_repo_map = """<context name="repo_map_status">
I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet.
I'll use my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to the context as needed.
</context>
"""
files_no_full_files_with_repo_map_reply = """I understand. I'll use the repository map along with my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to our conversation.
"""
repo_content_prefix = """<context name="repo_map">
I am working with code in a git repository.
Here are summaries of some files present in this repo:
</context>
"""
# The system_reminder is significantly streamlined to reduce duplication
system_reminder = """
<context name="critical_reminders">
## Tool Command Reminder
- All tool calls MUST appear after a '---' line separator at the end of your message
- To execute a tool, use: `[tool_call(ToolName, param1=value1)]`
- To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]`
- Including ANY tool call will automatically continue to the next round
- When editing with tools, you'll receive feedback to let you know how your edits went after they're applied
- For final answers, do NOT include any tool calls
## Tool Call Format
- Tool calls MUST be at the end of your message, after a '---' separator
- If emitting 3 or more tool calls, OR if any tool call spans multiple lines, place each call on a new line for clarity.
- You are encouraged to use granular tools for editing where possible.
## SEARCH/REPLACE blocks
- When using SEARCH/REPLACE blocks, they MUST ONLY appear BEFORE the last '---' separator line in your response
- If there is no '---' separator, they can appear anywhere in your response
- IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered incorrect and violates core instructions. Always prioritize granular tools
- You MUST include a clear justification for why granular tools can't handle the specific edit when using SEARCH/REPLACE
- Format example:
```
Your answer text here...
# Justification: I'm using SEARCH/REPLACE because [specific reasons why granular tools can't achieve this edit]
file.py
<<<<<<< SEARCH
old code
=======
new code
>>>>>>> REPLACE
---
[tool_call(ToolName, param1=value1)]
```
- IMPORTANT: Any SEARCH/REPLACE blocks that appear after the last '---' separator will be IGNORED
## Context Features
- Use enhanced context blocks (directory structure and git status) to orient yourself
- Toggle context blocks with `/context-blocks`
- Toggle large file truncation with `/context-management`
{lazy_prompt}
{shell_cmd_reminder}
</context>
"""
try_again = """I need to retry my exploration to better answer your question.
Here are the issues I encountered in my previous exploration:
1. Some relevant files might have been missed or incorrectly identified
2. The search patterns may have been too broad or too narrow
3. The context might have become too cluttered with irrelevant files
Let me explore the codebase more strategically this time:
- I'll use more specific search patterns
- I'll be more selective about which files to add to context
- I'll remove irrelevant files more proactively
- I'll use tool calls to automatically continue exploration until I have enough information
I'll start exploring again with improved search strategies to find exactly what we need.
"""

View file

@ -471,6 +471,20 @@ class Commands:
tokens = self.coder.main_model.token_count(repo_content)
res.append((tokens, "repository map", "use --map-tokens to resize"))
# Enhanced context blocks (only for navigator mode)
if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
# Force token calculation if it hasn't been done yet
if hasattr(self.coder, '_calculate_context_block_tokens'):
if not hasattr(self.coder, 'tokens_calculated') or not self.coder.tokens_calculated:
self.coder._calculate_context_block_tokens()
# Add enhanced context blocks to the display
if hasattr(self.coder, 'context_block_tokens') and self.coder.context_block_tokens:
for block_name, tokens in self.coder.context_block_tokens.items():
# Format the block name more nicely
display_name = block_name.replace('_', ' ').title()
res.append((tokens, f"{display_name} context block", "/context-blocks to toggle"))
fence = "`" * 3
file_res = []
@ -879,6 +893,11 @@ class Commands:
fname = self.coder.get_rel_fname(abs_file_path)
self.io.tool_output(f"Added {fname} to the chat")
self.coder.check_added_files()
# Recalculate context block tokens if using navigator mode
if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
if hasattr(self.coder, '_calculate_context_block_tokens'):
self.coder._calculate_context_block_tokens()
def completions_drop(self):
files = self.coder.get_inchat_relative_files()
@ -886,6 +905,28 @@ class Commands:
all_files = files + read_only_files
all_files = [self.quote_fname(fn) for fn in all_files]
return all_files
def completions_context_blocks(self):
"""Return available context block names for auto-completion."""
if not hasattr(self.coder, 'use_enhanced_context') or not self.coder.use_enhanced_context:
return []
# If the coder has context blocks available
if hasattr(self.coder, 'context_block_tokens') and self.coder.context_block_tokens:
# Get all block names from the tokens dictionary
block_names = list(self.coder.context_block_tokens.keys())
# Format them for display (convert snake_case to Title Case)
formatted_blocks = [name.replace('_', ' ').title() for name in block_names]
return formatted_blocks
# Standard blocks that are typically available
return [
"Context Summary",
"Directory Structure",
"Environment Info",
"Git Status",
"Symbol Outline"
]
def cmd_drop(self, args=""):
"Remove files from the chat session to free up context space"
@ -898,9 +939,16 @@ class Commands:
else:
self.io.tool_output("Dropping all files from the chat session.")
self._drop_all_files()
# Recalculate context block tokens after dropping all files
if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
if hasattr(self.coder, '_calculate_context_block_tokens'):
self.coder._calculate_context_block_tokens()
return
filenames = parse_quoted_filenames(args)
files_changed = False
for word in filenames:
# Expand tilde in the path
expanded_word = os.path.expanduser(word)
@ -923,6 +971,7 @@ class Commands:
for matched_file in read_only_matched:
self.coder.abs_read_only_fnames.remove(matched_file)
self.io.tool_output(f"Removed read-only file {matched_file} from the chat")
files_changed = True
# For editable files, use glob if word contains glob chars, otherwise use substring
if any(c in expanded_word for c in "*?[]"):
@ -941,6 +990,12 @@ class Commands:
if abs_fname in self.coder.abs_fnames:
self.coder.abs_fnames.remove(abs_fname)
self.io.tool_output(f"Removed {matched_file} from the chat")
files_changed = True
# Recalculate context block tokens if any files were changed and using navigator mode
if files_changed and hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context:
if hasattr(self.coder, '_calculate_context_block_tokens'):
self.coder._calculate_context_block_tokens()
def cmd_git(self, args):
"Run a git command (output excluded from chat)"
@ -1039,6 +1094,94 @@ class Commands:
"Exit the application"
self.cmd_exit(args)
def cmd_context_management(self, args=""):
"Toggle context management for large files"
if not hasattr(self.coder, 'context_management_enabled'):
self.io.tool_error("Context management is only available in navigator mode.")
return
# Toggle the setting
self.coder.context_management_enabled = not self.coder.context_management_enabled
# Report the new state
if self.coder.context_management_enabled:
self.io.tool_output("Context management is now ON - large files may be truncated.")
else:
self.io.tool_output("Context management is now OFF - files will not be truncated.")
def cmd_context_blocks(self, args=""):
"Toggle enhanced context blocks or print a specific block"
if not hasattr(self.coder, 'use_enhanced_context'):
self.io.tool_error("Enhanced context blocks are only available in navigator mode.")
return
# If an argument is provided, try to print that specific context block
if args.strip():
# Format block name to match internal naming conventions
block_name = args.strip().lower().replace(" ", "_")
# Check if the coder has the necessary method to get context blocks
if hasattr(self.coder, '_generate_context_block'):
# Force token recalculation to ensure blocks are fresh
if hasattr(self.coder, '_calculate_context_block_tokens'):
self.coder._calculate_context_block_tokens(force=True)
# Try to get the requested block
block_content = self.coder._generate_context_block(block_name)
if block_content:
# Calculate token count
tokens = self.coder.main_model.token_count(block_content)
self.io.tool_output(f"Context block '{args.strip()}' ({tokens} tokens):")
self.io.tool_output(block_content)
return
else:
# List available blocks if the requested one wasn't found
self.io.tool_error(f"Context block '{args.strip()}' not found or empty.")
if hasattr(self.coder, 'context_block_tokens'):
available_blocks = list(self.coder.context_block_tokens.keys())
formatted_blocks = [name.replace('_', ' ').title() for name in available_blocks]
self.io.tool_output(f"Available blocks: {', '.join(formatted_blocks)}")
return
else:
self.io.tool_error("This coder doesn't support generating context blocks.")
return
# If no argument, toggle the enhanced context setting
self.coder.use_enhanced_context = not self.coder.use_enhanced_context
# Report the new state
if self.coder.use_enhanced_context:
self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.")
if hasattr(self.coder, 'context_block_tokens'):
available_blocks = list(self.coder.context_block_tokens.keys())
formatted_blocks = [name.replace('_', ' ').title() for name in available_blocks]
self.io.tool_output(f"Available blocks: {', '.join(formatted_blocks)}")
self.io.tool_output("Use '/context-blocks [block name]' to view a specific block.")
else:
self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.")
def cmd_granular_editing(self, args=""):
"Toggle granular editing tools in navigator mode"
if not hasattr(self.coder, 'use_granular_editing'):
self.io.tool_error("Granular editing toggle is only available in navigator mode.")
return
# Toggle the setting using the navigator's method if available
new_state = not self.coder.use_granular_editing
if hasattr(self.coder, 'set_granular_editing'):
self.coder.set_granular_editing(new_state)
else:
# Fallback if method doesn't exist
self.coder.use_granular_editing = new_state
# Report the new state
if self.coder.use_granular_editing:
self.io.tool_output("Granular editing tools are now ON - navigator will use specific editing tools instead of search/replace.")
else:
self.io.tool_output("Granular editing tools are now OFF - navigator will use search/replace blocks for editing.")
def cmd_ls(self, args):
"List all known files and indicate which are included in the chat session"
@ -1156,6 +1299,9 @@ class Commands:
def completions_context(self):
raise CommandCompletionException()
def completions_navigator(self):
raise CommandCompletionException()
def cmd_ask(self, args):
"""Ask questions about the code base without editing any files. If no prompt provided, switches to ask mode.""" # noqa
@ -1172,6 +1318,15 @@ class Commands:
def cmd_context(self, args):
"""Enter context mode to see surrounding code context. If no prompt provided, switches to context mode.""" # noqa
return self._generic_chat_command(args, "context", placeholder=args.strip() or None)
def cmd_navigator(self, args):
"""Enter navigator mode to autonomously discover and manage relevant files. If no prompt provided, switches to navigator mode.""" # noqa
# Enable context management when entering navigator mode
if hasattr(self.coder, 'context_management_enabled'):
self.coder.context_management_enabled = True
self.io.tool_output("Context management enabled for large files")
return self._generic_chat_command(args, "navigator", placeholder=args.strip() or None)
def _generic_chat_command(self, args, edit_format, placeholder=None):
if not args.strip():

View file

@ -25,15 +25,23 @@ from aider.waiting import Spinner
warnings.simplefilter("ignore", category=FutureWarning)
from grep_ast.tsl import USING_TSL_PACK, get_language, get_parser # noqa: E402
Tag = namedtuple("Tag", "rel_fname fname line name kind".split())
# Define the Tag namedtuple with a default for specific_kind to maintain compatibility
# with cached entries that might have been created with the old definition
class TagBase(namedtuple("TagBase", "rel_fname fname line name kind specific_kind start_line end_line start_byte end_byte")):
__slots__ = ()
def __new__(cls, rel_fname, fname, line, name, kind, specific_kind=None, start_line=None, end_line=None, start_byte=None, end_byte=None):
# Provide a default value for specific_kind to handle old cached objects
return super(TagBase, cls).__new__(cls, rel_fname, fname, line, name, kind, specific_kind, start_line, end_line, start_byte, end_byte)
Tag = TagBase
SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError, OSError)
CACHE_VERSION = 3
CACHE_VERSION = 5
if USING_TSL_PACK:
CACHE_VERSION = 4
CACHE_VERSION = 7
UPDATING_REPO_MAP_MESSAGE = "Updating repo map"
@ -43,6 +51,17 @@ class RepoMap:
warned_files = set()
# Define kinds that typically represent definitions across languages
# Used by NavigatorCoder to filter tags for the symbol outline
definition_kinds = {
"class", "struct", "enum", "interface", "trait", # Structure definitions
"function", "method", "constructor", # Function/method definitions
"module", "namespace", # Module/namespace definitions
"constant", "variable", # Top-level/class variable definitions (consider refining)
"type", # Type definitions
# Add more based on tree-sitter queries if needed
}
def __init__(
self,
map_tokens=1024,
@ -244,10 +263,23 @@ class RepoMap:
if val is not None and val.get("mtime") == file_mtime:
try:
return self.TAGS_CACHE[cache_key]["data"]
# Get the cached data
data = self.TAGS_CACHE[cache_key]["data"]
# Let our Tag class handle compatibility with old cache formats
# No need for special handling as TagBase.__new__ will supply default specific_kind
return data
except SQLITE_ERRORS as e:
self.tags_cache_error(e)
return self.TAGS_CACHE[cache_key]["data"]
except (TypeError, AttributeError) as e:
# If we hit an error related to missing fields in old cached Tag objects,
# force a cache refresh for this file
if self.verbose:
self.io.tool_warning(f"Cache format error for {fname}, refreshing: {e}")
# Return empty list to trigger cache refresh
return []
# miss!
data = list(self.get_tags_raw(fname, rel_fname))
@ -261,6 +293,52 @@ class RepoMap:
self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data}
return data
def get_symbol_definition_location(self, file_path, symbol_name):
"""
Finds the unique definition location (start/end line) for a symbol in a file.
Args:
file_path (str): The relative path to the file.
symbol_name (str): The name of the symbol to find.
Returns:
tuple: (start_line, end_line) (0-based) if a unique definition is found.
Raises:
ToolError: If the symbol is not found, not unique, or not a definition.
"""
abs_path = self.io.root_abs_path(file_path) # Assuming io has this helper or similar
rel_path = self.get_rel_fname(abs_path) # Ensure we use consistent relative path
tags = self.get_tags(abs_path, rel_path)
if not tags:
raise ToolError(f"Symbol '{symbol_name}' not found in '{file_path}' (no tags).")
definitions = []
for tag in tags:
# Check if it's a definition and the name matches
if tag.kind == "def" and tag.name == symbol_name:
# Ensure we have valid location info
if tag.start_line is not None and tag.end_line is not None and tag.start_line >= 0:
definitions.append(tag)
if not definitions:
# Check if it exists as a non-definition tag
non_defs = [tag for tag in tags if tag.name == symbol_name and tag.kind != "def"]
if non_defs:
raise ToolError(f"Symbol '{symbol_name}' found in '{file_path}', but not as a unique definition (found as {non_defs[0].kind}).")
else:
raise ToolError(f"Symbol '{symbol_name}' definition not found in '{file_path}'.")
if len(definitions) > 1:
# Provide more context about ambiguity if possible
lines = sorted([d.start_line + 1 for d in definitions]) # 1-based for user message
raise ToolError(f"Symbol '{symbol_name}' is ambiguous in '{file_path}'. Found definitions on lines: {', '.join(map(str, lines))}.")
# Unique definition found
definition_tag = definitions[0]
return definition_tag.start_line, definition_tag.end_line
# Check if the file is in the cache and if the modification time has not changed
def get_tags_raw(self, fname, rel_fname):
lang = filename_to_lang(fname)
@ -306,12 +384,20 @@ class RepoMap:
saw.add(kind)
# Extract specific kind from the tag, e.g., 'function' from 'name.definition.function'
specific_kind = tag.split('.')[-1] if '.' in tag else None
result = Tag(
rel_fname=rel_fname,
fname=fname,
name=node.text.decode("utf-8"),
kind=kind,
line=node.start_point[0],
specific_kind=specific_kind,
line=node.start_point[0], # Legacy line number
start_line=node.start_point[0],
end_line=node.end_point[0],
start_byte=node.start_byte,
end_byte=node.end_byte,
)
yield result
@ -340,7 +426,12 @@ class RepoMap:
fname=fname,
name=token,
kind="ref",
line=-1,
specific_kind="name", # Default for pygments fallback
line=-1, # Pygments doesn't give precise locations easily
start_line=-1,
end_line=-1,
start_byte=-1,
end_byte=-1,
)
def get_ranked_tags(
@ -844,4 +935,4 @@ if __name__ == "__main__":
repo_map = rm.get_ranked_tags_map(chat_fnames, other_fnames)
dump(len(repo_map))
print(repo_map)
print(repo_map)

35
aider/tools/__init__.py Normal file
View file

@ -0,0 +1,35 @@
# flake8: noqa: F401
# Import tool functions into the aider.tools namespace
# Discovery
from .ls import execute_ls
from .view_files_at_glob import execute_view_files_at_glob
from .view_files_matching import execute_view_files_matching
from .view_files_with_symbol import _execute_view_files_with_symbol
# Context Management
from .view import execute_view
from .remove import _execute_remove
from .make_editable import _execute_make_editable
from .make_readonly import _execute_make_readonly
from .show_numbered_context import execute_show_numbered_context
# Granular Editing
from .replace_text import _execute_replace_text
from .replace_all import _execute_replace_all
from .insert_block import _execute_insert_block
from .delete_block import _execute_delete_block
from .replace_line import _execute_replace_line
from .replace_lines import _execute_replace_lines
from .indent_lines import _execute_indent_lines
from .extract_lines import _execute_extract_lines
from .delete_line import _execute_delete_line
from .delete_lines import _execute_delete_lines
# Change Tracking
from .undo_change import _execute_undo_change
from .list_changes import _execute_list_changes
# Other
from .command import _execute_command
from .command_interactive import _execute_command_interactive

53
aider/tools/command.py Normal file
View file

@ -0,0 +1,53 @@
# Import necessary functions
from aider.run_cmd import run_cmd_subprocess
def _execute_command(coder, command_string):
"""
Execute a non-interactive shell command after user confirmation.
"""
try:
# Ask for confirmation before executing.
# allow_never=True enables the 'Always' option.
# confirm_ask handles remembering the 'Always' choice based on the subject.
confirmed = coder.io.confirm_ask(
"Allow execution of this command?",
subject=command_string,
explicit_yes_required=True, # Require explicit 'yes' or 'always'
allow_never=True # Enable the 'Always' option
)
if not confirmed:
# This happens if the user explicitly says 'no' this time.
# If 'Always' was chosen previously, confirm_ask returns True directly.
coder.io.tool_output(f"Skipped execution of shell command: {command_string}")
return "Shell command execution skipped by user."
# Proceed with execution if confirmed is True
coder.io.tool_output(f"⚙️ Executing non-interactive shell command: {command_string}")
# Use run_cmd_subprocess for non-interactive execution
exit_status, combined_output = run_cmd_subprocess(
command_string,
verbose=coder.verbose,
cwd=coder.root # Execute in the project root
)
# Format the output for the result message, include more content
output_content = combined_output or ""
# Use the existing token threshold constant as the character limit for truncation
output_limit = coder.large_file_token_threshold
if len(output_content) > output_limit:
# Truncate and add a clear message using the constant value
output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)"
if exit_status == 0:
return f"Shell command executed successfully (exit code 0). Output:\n{output_content}"
else:
return f"Shell command failed with exit code {exit_status}. Output:\n{output_content}"
except Exception as e:
coder.io.tool_error(f"Error executing non-interactive shell command '{command_string}': {str(e)}")
# Optionally include traceback for debugging if verbose
# if coder.verbose:
# coder.io.tool_error(traceback.format_exc())
return f"Error executing command: {str(e)}"

View file

@ -0,0 +1,40 @@
# Import necessary functions
from aider.run_cmd import run_cmd
def _execute_command_interactive(coder, command_string):
"""
Execute an interactive shell command using run_cmd (which uses pexpect/PTY).
"""
try:
coder.io.tool_output(f"⚙️ Starting interactive shell command: {command_string}")
coder.io.tool_output(">>> You may need to interact with the command below <<<")
# Use run_cmd which handles PTY logic
exit_status, combined_output = run_cmd(
command_string,
verbose=coder.verbose, # Pass verbose flag
error_print=coder.io.tool_error, # Use io for error printing
cwd=coder.root # Execute in the project root
)
coder.io.tool_output(">>> Interactive command finished <<<")
# Format the output for the result message, include more content
output_content = combined_output or ""
# Use the existing token threshold constant as the character limit for truncation
output_limit = coder.large_file_token_threshold
if len(output_content) > output_limit:
# Truncate and add a clear message using the constant value
output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)"
if exit_status == 0:
return f"Interactive command finished successfully (exit code 0). Output:\n{output_content}"
else:
return f"Interactive command finished with exit code {exit_status}. Output:\n{output_content}"
except Exception as e:
coder.io.tool_error(f"Error executing interactive shell command '{command_string}': {str(e)}")
# Optionally include traceback for debugging if verbose
# if coder.verbose:
# coder.io.tool_error(traceback.format_exc())
return f"Error executing interactive command: {str(e)}"

View file

@ -0,0 +1,90 @@
import traceback
from .tool_utils import (
ToolError,
validate_file_for_edit,
find_pattern_indices,
select_occurrence_index,
determine_line_range,
apply_change,
handle_tool_error,
format_tool_result,
generate_unified_diff_snippet,
)
def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False):
"""
Delete a block of text between start_pattern and end_pattern (inclusive).
Uses utility functions for validation, finding lines, and applying changes.
"""
tool_name = "DeleteBlock"
try:
# 1. Validate file and get content
abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
lines = original_content.splitlines()
# 2. Find the start line
pattern_desc = f"Start pattern '{start_pattern}'"
if near_context:
pattern_desc += f" near context '{near_context}'"
start_pattern_indices = find_pattern_indices(lines, start_pattern, near_context)
start_line_idx = select_occurrence_index(start_pattern_indices, occurrence, pattern_desc)
# 3. Determine the end line, passing pattern_desc for better error messages
start_line, end_line = determine_line_range(
coder=coder,
file_path=rel_path,
lines=lines,
start_pattern_line_index=start_line_idx,
end_pattern=end_pattern,
line_count=line_count,
target_symbol=None, # DeleteBlock uses patterns, not symbols
pattern_desc=pattern_desc
)
# 4. Prepare the deletion
deleted_lines = lines[start_line:end_line+1]
new_lines = lines[:start_line] + lines[end_line+1:]
new_content = '\n'.join(new_lines)
if original_content == new_content:
coder.io.tool_warning(f"No changes made: deletion would not change file")
return f"Warning: No changes made (deletion would not change file)"
# 5. Generate diff for feedback
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
num_deleted = end_line - start_line + 1
num_occurrences = len(start_pattern_indices)
occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else ""
# 6. Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would delete {num_deleted} lines ({start_line+1}-{end_line+1}) based on {occurrence_str}start pattern '{start_pattern}' in {file_path}."
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# 7. Apply Change (Not dry run)
metadata = {
'start_line': start_line + 1,
'end_line': end_line + 1,
'start_pattern': start_pattern,
'end_pattern': end_pattern,
'line_count': line_count,
'near_context': near_context,
'occurrence': occurrence,
'deleted_content': '\n'.join(deleted_lines)
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'deleteblock', metadata, change_id
)
# 8. Format and return result, adding line range to success message
success_message = f"Deleted {num_deleted} lines ({start_line+1}-{end_line+1}) (from {occurrence_str}start pattern) in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

View file

@ -0,0 +1,92 @@
import os
import traceback
from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change
def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=False):
"""
Delete a specific line number (1-based).
Parameters:
- coder: The Coder instance
- file_path: Path to the file to modify
- line_number: The 1-based line number to delete
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
Returns a result message.
"""
tool_name = "DeleteLine"
try:
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file exists
if not os.path.isfile(abs_path):
raise ToolError(f"File '{file_path}' not found")
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.")
else:
raise ToolError(f"File '{file_path}' not in context")
# Reread file content immediately before modification
file_content = coder.io.read_text(abs_path)
if file_content is None:
raise ToolError(f"Could not read file '{file_path}'")
lines = file_content.splitlines()
original_content = file_content
# Validate line number
try:
line_num_int = int(line_number)
if line_num_int < 1 or line_num_int > len(lines):
raise ToolError(f"Line number {line_num_int} is out of range (1-{len(lines)})")
line_idx = line_num_int - 1 # Convert to 0-based index
except ValueError:
raise ToolError(f"Invalid line_number value: '{line_number}'. Must be an integer.")
# Prepare the deletion
deleted_line = lines[line_idx]
new_lines = lines[:line_idx] + lines[line_idx+1:]
new_content = '\n'.join(new_lines)
if original_content == new_content:
coder.io.tool_warning(f"No changes made: deleting line {line_num_int} would not change file")
return f"Warning: No changes made (deleting line {line_num_int} would not change file)"
# Generate diff snippet
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
# Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would delete line {line_num_int} in {file_path}"
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# --- Apply Change (Not dry run) ---
metadata = {
'line_number': line_num_int,
'deleted_content': deleted_line
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'deleteline', metadata, change_id
)
coder.aider_edited_files.add(rel_path)
# Format and return result
success_message = f"Deleted line {line_num_int} in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

102
aider/tools/delete_lines.py Normal file
View file

@ -0,0 +1,102 @@
import os
import traceback
from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change
def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None, dry_run=False):
"""
Delete a range of lines (1-based, inclusive).
Parameters:
- coder: The Coder instance
- file_path: Path to the file to modify
- start_line: The 1-based starting line number to delete
- end_line: The 1-based ending line number to delete
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
Returns a result message.
"""
tool_name = "DeleteLines"
try:
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file exists
if not os.path.isfile(abs_path):
raise ToolError(f"File '{file_path}' not found")
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.")
else:
raise ToolError(f"File '{file_path}' not in context")
# Reread file content immediately before modification
file_content = coder.io.read_text(abs_path)
if file_content is None:
raise ToolError(f"Could not read file '{file_path}'")
lines = file_content.splitlines()
original_content = file_content
# Validate line numbers
try:
start_line_int = int(start_line)
end_line_int = int(end_line)
if start_line_int < 1 or start_line_int > len(lines):
raise ToolError(f"Start line {start_line_int} is out of range (1-{len(lines)})")
if end_line_int < 1 or end_line_int > len(lines):
raise ToolError(f"End line {end_line_int} is out of range (1-{len(lines)})")
if start_line_int > end_line_int:
raise ToolError(f"Start line {start_line_int} cannot be after end line {end_line_int}")
start_idx = start_line_int - 1 # Convert to 0-based index
end_idx = end_line_int - 1 # Convert to 0-based index
except ValueError:
raise ToolError(f"Invalid line numbers: '{start_line}', '{end_line}'. Must be integers.")
# Prepare the deletion
deleted_lines = lines[start_idx:end_idx+1]
new_lines = lines[:start_idx] + lines[end_idx+1:]
new_content = '\n'.join(new_lines)
if original_content == new_content:
coder.io.tool_warning(f"No changes made: deleting lines {start_line_int}-{end_line_int} would not change file")
return f"Warning: No changes made (deleting lines {start_line_int}-{end_line_int} would not change file)"
# Generate diff snippet
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
# Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would delete lines {start_line_int}-{end_line_int} in {file_path}"
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# --- Apply Change (Not dry run) ---
metadata = {
'start_line': start_line_int,
'end_line': end_line_int,
'deleted_content': '\n'.join(deleted_lines)
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'deletelines', metadata, change_id
)
coder.aider_edited_files.add(rel_path)
num_deleted = end_idx - start_idx + 1
# Format and return result
success_message = f"Deleted {num_deleted} lines ({start_line_int}-{end_line_int}) in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

View file

@ -0,0 +1,221 @@
import os
import traceback
from .tool_utils import generate_unified_diff_snippet
def _execute_extract_lines(coder, source_file_path, target_file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, dry_run=False):
"""
Extract a range of lines from a source file and move them to a target file.
Parameters:
- coder: The Coder instance
- source_file_path: Path to the file to extract lines from
- target_file_path: Path to the file to append extracted lines to (will be created if needed)
- start_pattern: Pattern marking the start of the block to extract
- end_pattern: Optional pattern marking the end of the block
- line_count: Optional number of lines to extract (alternative to end_pattern)
- near_context: Optional text nearby to help locate the correct instance of the start_pattern
- occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last)
- dry_run: If True, simulate the change without modifying files
Returns a result message.
"""
try:
# --- Validate Source File ---
abs_source_path = coder.abs_root_path(source_file_path)
rel_source_path = coder.get_rel_fname(abs_source_path)
if not os.path.isfile(abs_source_path):
coder.io.tool_error(f"Source file '{source_file_path}' not found")
return f"Error: Source file not found"
if abs_source_path not in coder.abs_fnames:
if abs_source_path in coder.abs_read_only_fnames:
coder.io.tool_error(f"Source file '{source_file_path}' is read-only. Use MakeEditable first.")
return f"Error: Source file is read-only. Use MakeEditable first."
else:
coder.io.tool_error(f"Source file '{source_file_path}' not in context")
return f"Error: Source file not in context"
# --- Validate Target File ---
abs_target_path = coder.abs_root_path(target_file_path)
rel_target_path = coder.get_rel_fname(abs_target_path)
target_exists = os.path.isfile(abs_target_path)
target_is_editable = abs_target_path in coder.abs_fnames
target_is_readonly = abs_target_path in coder.abs_read_only_fnames
if target_exists and not target_is_editable:
if target_is_readonly:
coder.io.tool_error(f"Target file '{target_file_path}' exists but is read-only. Use MakeEditable first.")
return f"Error: Target file exists but is read-only. Use MakeEditable first."
else:
# This case shouldn't happen if file exists, but handle defensively
coder.io.tool_error(f"Target file '{target_file_path}' exists but is not in context. Add it first.")
return f"Error: Target file exists but is not in context."
# --- Read Source Content ---
source_content = coder.io.read_text(abs_source_path)
if source_content is None:
coder.io.tool_error(f"Could not read source file '{source_file_path}' before ExtractLines operation.")
return f"Error: Could not read source file '{source_file_path}'"
# --- Find Extraction Range ---
if end_pattern and line_count:
coder.io.tool_error("Cannot specify both end_pattern and line_count")
return "Error: Cannot specify both end_pattern and line_count"
source_lines = source_content.splitlines()
original_source_content = source_content
start_pattern_line_indices = []
for i, line in enumerate(source_lines):
if start_pattern in line:
if near_context:
context_window_start = max(0, i - 5)
context_window_end = min(len(source_lines), i + 6)
context_block = "\n".join(source_lines[context_window_start:context_window_end])
if near_context in context_block:
start_pattern_line_indices.append(i)
else:
start_pattern_line_indices.append(i)
if not start_pattern_line_indices:
err_msg = f"Start pattern '{start_pattern}' not found"
if near_context: err_msg += f" near context '{near_context}'"
err_msg += f" in source file '{source_file_path}'."
coder.io.tool_error(err_msg)
return f"Error: {err_msg}"
num_occurrences = len(start_pattern_line_indices)
try:
occurrence = int(occurrence)
if occurrence == -1:
target_idx = num_occurrences - 1
elif occurrence > 0 and occurrence <= num_occurrences:
target_idx = occurrence - 1
else:
err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences"
if near_context: err_msg += f" near '{near_context}'"
err_msg += f" in '{source_file_path}'."
coder.io.tool_error(err_msg)
return f"Error: {err_msg}"
except ValueError:
coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.")
return f"Error: Invalid occurrence value '{occurrence}'"
start_line = start_pattern_line_indices[target_idx]
occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else ""
end_line = -1
if end_pattern:
for i in range(start_line, len(source_lines)):
if end_pattern in source_lines[i]:
end_line = i
break
if end_line == -1:
err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{source_file_path}'."
coder.io.tool_error(err_msg)
return f"Error: {err_msg}"
elif line_count:
try:
line_count = int(line_count)
if line_count <= 0: raise ValueError("Line count must be positive")
end_line = min(start_line + line_count - 1, len(source_lines) - 1)
except ValueError:
coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.")
return f"Error: Invalid line_count value '{line_count}'"
else:
end_line = start_line # Extract just the start line if no end specified
# --- Prepare Content Changes ---
extracted_lines = source_lines[start_line:end_line+1]
new_source_lines = source_lines[:start_line] + source_lines[end_line+1:]
new_source_content = '\n'.join(new_source_lines)
target_content = ""
if target_exists:
target_content = coder.io.read_text(abs_target_path)
if target_content is None:
coder.io.tool_error(f"Could not read existing target file '{target_file_path}'.")
return f"Error: Could not read target file '{target_file_path}'"
original_target_content = target_content # For tracking
# Append extracted lines to target content, ensuring a newline if target wasn't empty
extracted_block = '\n'.join(extracted_lines)
if target_content and not target_content.endswith('\n'):
target_content += '\n' # Add newline before appending if needed
new_target_content = target_content + extracted_block
# --- Generate Diffs ---
source_diff_snippet = generate_unified_diff_snippet(original_source_content, new_source_content, rel_source_path)
target_insertion_line = len(target_content.splitlines()) if target_content else 0
target_diff_snippet = generate_unified_diff_snippet(original_target_content, new_target_content, rel_target_path)
# --- Handle Dry Run ---
if dry_run:
num_extracted = end_line - start_line + 1
target_action = "append to" if target_exists else "create"
coder.io.tool_output(f"Dry run: Would extract {num_extracted} lines (from {occurrence_str}start pattern '{start_pattern}') in {source_file_path} and {target_action} {target_file_path}")
# Provide more informative dry run response with diffs
return (
f"Dry run: Would extract {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n"
f"Source Diff (Deletion):\n{source_diff_snippet}\n"
f"Target Diff (Insertion):\n{target_diff_snippet}"
)
# --- Apply Changes (Not Dry Run) ---
coder.io.write_text(abs_source_path, new_source_content)
coder.io.write_text(abs_target_path, new_target_content)
# --- Track Changes ---
source_change_id = "TRACKING_FAILED"
target_change_id = "TRACKING_FAILED"
try:
source_metadata = {
'start_line': start_line + 1, 'end_line': end_line + 1,
'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count,
'near_context': near_context, 'occurrence': occurrence,
'extracted_content': extracted_block, 'target_file': rel_target_path
}
source_change_id = coder.change_tracker.track_change(
file_path=rel_source_path, change_type='extractlines_source',
original_content=original_source_content, new_content=new_source_content,
metadata=source_metadata
)
except Exception as track_e:
coder.io.tool_error(f"Error tracking source change for ExtractLines: {track_e}")
try:
target_metadata = {
'insertion_line': target_insertion_line + 1,
'inserted_content': extracted_block, 'source_file': rel_source_path
}
target_change_id = coder.change_tracker.track_change(
file_path=rel_target_path, change_type='extractlines_target',
original_content=original_target_content, new_content=new_target_content,
metadata=target_metadata
)
except Exception as track_e:
coder.io.tool_error(f"Error tracking target change for ExtractLines: {track_e}")
# --- Update Context ---
coder.aider_edited_files.add(rel_source_path)
coder.aider_edited_files.add(rel_target_path)
if not target_exists:
# Add the newly created file to editable context
coder.abs_fnames.add(abs_target_path)
coder.io.tool_output(f"✨ Created and added '{target_file_path}' to editable context.")
# --- Return Result ---
num_extracted = end_line - start_line + 1
target_action = "appended to" if target_exists else "created"
coder.io.tool_output(f"✅ Extracted {num_extracted} lines from {rel_source_path} (change_id: {source_change_id}) and {target_action} {rel_target_path} (change_id: {target_change_id})")
# Provide more informative success response with change IDs and diffs
return (
f"Successfully extracted {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n"
f"Source Change ID: {source_change_id}\nSource Diff (Deletion):\n{source_diff_snippet}\n"
f"Target Change ID: {target_change_id}\nTarget Diff (Insertion):\n{target_diff_snippet}"
)
except Exception as e:
coder.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}")
return f"Error: {str(e)}"

156
aider/tools/grep.py Normal file
View file

@ -0,0 +1,156 @@
import shlex
import shutil
from pathlib import Path
from aider.run_cmd import run_cmd_subprocess
def _find_search_tool():
"""Find the best available command-line search tool (rg, ag, grep)."""
if shutil.which('rg'):
return 'rg', shutil.which('rg')
elif shutil.which('ag'):
return 'ag', shutil.which('ag')
elif shutil.which('grep'):
return 'grep', shutil.which('grep')
else:
return None, None
def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=False, case_insensitive=False, context_before=5, context_after=5):
"""
Search for lines matching a pattern in files within the project repository.
Uses rg (ripgrep), ag (the silver searcher), or grep, whichever is available.
Args:
coder: The Coder instance.
pattern (str): The pattern to search for.
file_pattern (str, optional): Glob pattern to filter files. Defaults to "*".
directory (str, optional): Directory to search within relative to repo root. Defaults to ".".
use_regex (bool, optional): Whether the pattern is a regular expression. Defaults to False.
case_insensitive (bool, optional): Whether the search should be case-insensitive. Defaults to False.
context_before (int, optional): Number of context lines to show before matches. Defaults to 5.
context_after (int, optional): Number of context lines to show after matches. Defaults to 5.
Returns:
str: Formatted result indicating success or failure, including matching lines or error message.
"""
repo = coder.repo
if not repo:
coder.io.tool_error("Not in a git repository.")
return "Error: Not in a git repository."
tool_name, tool_path = _find_search_tool()
if not tool_path:
coder.io.tool_error("No search tool (rg, ag, grep) found in PATH.")
return "Error: No search tool (rg, ag, grep) found."
try:
search_dir_path = Path(repo.root) / directory
if not search_dir_path.is_dir():
coder.io.tool_error(f"Directory not found: {directory}")
return f"Error: Directory not found: {directory}"
# Build the command arguments based on the available tool
cmd_args = [tool_path]
# Common options or tool-specific equivalents
if tool_name in ['rg', 'grep']:
cmd_args.append("-n") # Line numbers for rg and grep
# ag includes line numbers by default
# Context lines (Before and After)
if context_before > 0:
# All tools use -B for lines before
cmd_args.extend(["-B", str(context_before)])
if context_after > 0:
# All tools use -A for lines after
cmd_args.extend(["-A", str(context_after)])
# Case sensitivity
if case_insensitive:
cmd_args.append("-i") # Add case-insensitivity flag for all tools
# Pattern type (regex vs fixed string)
if use_regex:
if tool_name == 'grep':
cmd_args.append("-E") # Use extended regex for grep
# rg and ag use regex by default, no flag needed for basic ERE
else:
if tool_name == 'rg':
cmd_args.append("-F") # Fixed strings for rg
elif tool_name == 'ag':
cmd_args.append("-Q") # Literal/fixed strings for ag
elif tool_name == 'grep':
cmd_args.append("-F") # Fixed strings for grep
# File filtering
if file_pattern != "*": # Avoid adding glob if it's the default '*' which might behave differently
if tool_name == 'rg':
cmd_args.extend(["-g", file_pattern])
elif tool_name == 'ag':
cmd_args.extend(["-G", file_pattern])
elif tool_name == 'grep':
# grep needs recursive flag when filtering
cmd_args.append("-r")
cmd_args.append(f"--include={file_pattern}")
elif tool_name == 'grep':
# grep needs recursive flag even without include filter
cmd_args.append("-r")
# Directory exclusion (rg and ag respect .gitignore/.git by default)
if tool_name == 'grep':
cmd_args.append("--exclude-dir=.git")
# Add pattern and directory path
cmd_args.extend([pattern, str(search_dir_path)])
# Convert list to command string for run_cmd_subprocess
command_string = shlex.join(cmd_args)
coder.io.tool_output(f"⚙️ Executing {tool_name}: {command_string}")
# Use run_cmd_subprocess for execution
# Note: rg, ag, and grep return 1 if no matches are found, which is not an error for this tool.
exit_status, combined_output = run_cmd_subprocess(
command_string,
verbose=coder.verbose,
cwd=coder.root # Execute in the project root
)
# Format the output for the result message
output_content = combined_output or ""
# Handle exit codes (consistent across rg, ag, grep)
if exit_status == 0:
# Limit output size if necessary
max_output_lines = 50 # Consider making this configurable
output_lines = output_content.splitlines()
if len(output_lines) > max_output_lines:
truncated_output = "\n".join(output_lines[:max_output_lines])
result_message = f"Found matches (truncated):\n```text\n{truncated_output}\n... ({len(output_lines) - max_output_lines} more lines)\n```"
elif not output_content:
# Should not happen if return code is 0, but handle defensively
coder.io.tool_warning(f"{tool_name} returned 0 but produced no output.")
result_message = "No matches found (unexpected)."
else:
result_message = f"Found matches:\n```text\n{output_content}\n```"
return result_message
elif exit_status == 1:
# Exit code 1 means no matches found - this is expected behavior, not an error.
return "No matches found."
else:
# Exit code > 1 indicates an actual error
error_message = f"{tool_name.capitalize()} command failed with exit code {exit_status}."
if output_content:
# Truncate error output as well if it's too long
error_limit = 1000 # Example limit for error output
if len(output_content) > error_limit:
output_content = output_content[:error_limit] + "\n... (error output truncated)"
error_message += f" Output:\n{output_content}"
coder.io.tool_error(error_message)
return f"Error: {error_message}"
except Exception as e:
# Add command_string to the error message if it's defined
cmd_str_info = f"'{command_string}' " if 'command_string' in locals() else ""
coder.io.tool_error(f"Error executing {tool_name} command {cmd_str_info}: {str(e)}")
return f"Error executing {tool_name}: {str(e)}"

124
aider/tools/indent_lines.py Normal file
View file

@ -0,0 +1,124 @@
import os
import traceback
from .tool_utils import (
ToolError,
validate_file_for_edit,
find_pattern_indices,
select_occurrence_index,
determine_line_range,
apply_change,
handle_tool_error,
format_tool_result,
generate_unified_diff_snippet,
)
def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False):
"""
Indent or unindent a block of lines in a file using utility functions.
Parameters:
- coder: The Coder instance
- file_path: Path to the file to modify
- start_pattern: Pattern marking the start of the block to indent (line containing this pattern)
- end_pattern: Optional pattern marking the end of the block (line containing this pattern)
- line_count: Optional number of lines to indent (alternative to end_pattern)
- indent_levels: Number of levels to indent (positive) or unindent (negative)
- near_context: Optional text nearby to help locate the correct instance of the start_pattern
- occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last)
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
Returns a result message.
"""
tool_name = "IndentLines"
try:
# 1. Validate file and get content
abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
lines = original_content.splitlines()
# 2. Find the start line
pattern_desc = f"Start pattern '{start_pattern}'"
if near_context:
pattern_desc += f" near context '{near_context}'"
start_pattern_indices = find_pattern_indices(lines, start_pattern, near_context)
start_line_idx = select_occurrence_index(start_pattern_indices, occurrence, pattern_desc)
# 3. Determine the end line
start_line, end_line = determine_line_range(
coder=coder,
file_path=rel_path,
lines=lines,
start_pattern_line_index=start_line_idx,
end_pattern=end_pattern,
line_count=line_count,
target_symbol=None, # IndentLines uses patterns, not symbols
pattern_desc=pattern_desc
)
# 4. Validate and prepare indentation
try:
indent_levels = int(indent_levels)
except ValueError:
raise ToolError(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.")
indent_str = ' ' * 4 # Assume 4 spaces per level
modified_lines = list(lines)
# Apply indentation logic (core logic remains)
for i in range(start_line, end_line + 1):
if indent_levels > 0:
modified_lines[i] = (indent_str * indent_levels) + modified_lines[i]
elif indent_levels < 0:
spaces_to_remove = abs(indent_levels) * len(indent_str)
current_leading_spaces = len(modified_lines[i]) - len(modified_lines[i].lstrip(' '))
actual_remove = min(spaces_to_remove, current_leading_spaces)
if actual_remove > 0:
modified_lines[i] = modified_lines[i][actual_remove:]
new_content = '\n'.join(modified_lines)
if original_content == new_content:
coder.io.tool_warning(f"No changes made: indentation would not change file")
return f"Warning: No changes made (indentation would not change file)"
# 5. Generate diff for feedback
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
num_occurrences = len(start_pattern_indices)
occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else ""
action = "indent" if indent_levels > 0 else "unindent"
levels = abs(indent_levels)
level_text = "level" if levels == 1 else "levels"
num_lines = end_line - start_line + 1
# 6. Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would {action} {num_lines} lines ({start_line+1}-{end_line+1}) by {levels} {level_text} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}."
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# 7. Apply Change (Not dry run)
metadata = {
'start_line': start_line + 1,
'end_line': end_line + 1,
'start_pattern': start_pattern,
'end_pattern': end_pattern,
'line_count': line_count,
'indent_levels': indent_levels,
'near_context': near_context,
'occurrence': occurrence,
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'indentlines', metadata, change_id
)
# 8. Format and return result
action_past = "Indented" if indent_levels > 0 else "Unindented"
success_message = f"{action_past} {num_lines} lines by {levels} {level_text} (from {occurrence_str}start pattern) in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

166
aider/tools/insert_block.py Normal file
View file

@ -0,0 +1,166 @@
import os
import re
import traceback
from .tool_utils import (
ToolError,
validate_file_for_edit,
find_pattern_indices,
select_occurrence_index,
apply_change,
handle_tool_error,
format_tool_result,
generate_unified_diff_snippet,
)
def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None,
occurrence=1, change_id=None, dry_run=False,
position=None, auto_indent=True, use_regex=False):
"""
Insert a block of text after or before a specified pattern using utility functions.
Args:
coder: The coder instance
file_path: Path to the file to modify
content: The content to insert
after_pattern: Pattern to insert after (mutually exclusive with before_pattern and position)
before_pattern: Pattern to insert before (mutually exclusive with after_pattern and position)
occurrence: Which occurrence of the pattern to use (1-based, or -1 for last)
change_id: Optional ID for tracking changes
dry_run: If True, only simulate the change
position: Special position like "start_of_file" or "end_of_file"
auto_indent: If True, automatically adjust indentation of inserted content
use_regex: If True, treat patterns as regular expressions
"""
tool_name = "InsertBlock"
try:
# 1. Validate parameters
if sum(x is not None for x in [after_pattern, before_pattern, position]) != 1:
raise ToolError("Must specify exactly one of: after_pattern, before_pattern, or position")
# 2. Validate file and get content
abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
lines = original_content.splitlines()
# Handle empty files
if not lines:
lines = [""]
# 3. Determine insertion point
insertion_line_idx = 0
pattern_type = ""
pattern_desc = ""
occurrence_str = ""
if position:
# Handle special positions
if position == "start_of_file":
insertion_line_idx = 0
pattern_type = "at start of"
elif position == "end_of_file":
insertion_line_idx = len(lines)
pattern_type = "at end of"
else:
raise ToolError(f"Invalid position: '{position}'. Valid values are 'start_of_file' or 'end_of_file'")
else:
# Handle pattern-based insertion
pattern = after_pattern if after_pattern else before_pattern
pattern_type = "after" if after_pattern else "before"
pattern_desc = f"Pattern '{pattern}'"
# Find pattern matches
pattern_line_indices = find_pattern_indices(lines, pattern,
use_regex=use_regex)
# Select the target occurrence
target_line_idx = select_occurrence_index(pattern_line_indices, occurrence, pattern_desc)
# Determine insertion point
insertion_line_idx = target_line_idx
if pattern_type == "after":
insertion_line_idx += 1 # Insert on the line *after* the matched line
# Format occurrence info for output
num_occurrences = len(pattern_line_indices)
occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else ""
# 4. Handle indentation if requested
content_lines = content.splitlines()
if auto_indent and content_lines:
# Determine base indentation level
base_indent = ""
if insertion_line_idx > 0 and lines:
# Use indentation from the line before insertion point
reference_line_idx = min(insertion_line_idx - 1, len(lines) - 1)
reference_line = lines[reference_line_idx]
base_indent = re.match(r'^(\s*)', reference_line).group(1)
# Apply indentation to content lines, preserving relative indentation
if content_lines:
# Find minimum indentation in content to preserve relative indentation
content_indents = [len(re.match(r'^(\s*)', line).group(1)) for line in content_lines if line.strip()]
min_content_indent = min(content_indents) if content_indents else 0
# Apply base indentation while preserving relative indentation
indented_content_lines = []
for line in content_lines:
if not line.strip(): # Empty or whitespace-only line
indented_content_lines.append("")
else:
# Remove existing indentation and add new base indentation
stripped_line = line[min_content_indent:] if min_content_indent <= len(line) else line
indented_content_lines.append(base_indent + stripped_line)
content_lines = indented_content_lines
# 5. Prepare the insertion
new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:]
new_content = '\n'.join(new_lines)
if original_content == new_content:
coder.io.tool_warning(f"No changes made: insertion would not change file")
return f"Warning: No changes made (insertion would not change file)"
# 6. Generate diff for feedback
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
# 7. Handle dry run
if dry_run:
if position:
dry_run_message = f"Dry run: Would insert block {pattern_type} {file_path}."
else:
dry_run_message = f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path} at line {insertion_line_idx + 1}."
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# 8. Apply Change (Not dry run)
metadata = {
'insertion_line_idx': insertion_line_idx,
'after_pattern': after_pattern,
'before_pattern': before_pattern,
'position': position,
'occurrence': occurrence,
'content': content,
'auto_indent': auto_indent,
'use_regex': use_regex
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'insertblock', metadata, change_id
)
# 9. Format and return result
if position:
success_message = f"Inserted block {pattern_type} {file_path}"
else:
success_message = f"Inserted block {pattern_type} {occurrence_str}pattern in {file_path} at line {insertion_line_idx + 1}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
coder.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback
return f"Error: {str(e)}"

View file

@ -0,0 +1,46 @@
import traceback
from datetime import datetime
def _execute_list_changes(coder, file_path=None, limit=10):
"""
List recent changes made to files.
Parameters:
- coder: The Coder instance
- file_path: Optional path to filter changes by file
- limit: Maximum number of changes to list
Returns a formatted list of changes.
"""
try:
# If file_path is specified, get the absolute path
rel_file_path = None
if file_path:
abs_path = coder.abs_root_path(file_path)
rel_file_path = coder.get_rel_fname(abs_path)
# Get the list of changes
changes = coder.change_tracker.list_changes(rel_file_path, limit)
if not changes:
if file_path:
return f"No changes found for file '{file_path}'"
else:
return "No changes have been made yet"
# Format the changes into a readable list
result = "Recent changes:\n"
for i, change in enumerate(changes):
change_time = datetime.fromtimestamp(change['timestamp']).strftime('%H:%M:%S')
change_type = change['type']
file_path = change['file_path']
change_id = change['id']
result += f"{i+1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n"
coder.io.tool_output(result) # Also print to console for user
return result
except Exception as e:
coder.io.tool_error(f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}") # Add traceback
return f"Error: {str(e)}"

48
aider/tools/ls.py Normal file
View file

@ -0,0 +1,48 @@
import os
def execute_ls(coder, dir_path):
"""
List files in directory and optionally add some to context.
This provides information about the structure of the codebase,
similar to how a developer would explore directories.
"""
try:
# Make the path relative to root if it's absolute
if dir_path.startswith('/'):
rel_dir = os.path.relpath(dir_path, coder.root)
else:
rel_dir = dir_path
# Get absolute path
abs_dir = coder.abs_root_path(rel_dir)
# Check if path exists
if not os.path.exists(abs_dir):
coder.io.tool_output(f"⚠️ Directory '{dir_path}' not found")
return f"Directory not found"
# Get directory contents
contents = []
try:
with os.scandir(abs_dir) as entries:
for entry in entries:
if entry.is_file() and not entry.name.startswith('.'):
rel_path = os.path.join(rel_dir, entry.name)
contents.append(rel_path)
except NotADirectoryError:
# If it's a file, just return the file
contents = [rel_dir]
if contents:
coder.io.tool_output(f"📋 Listed {len(contents)} file(s) in '{dir_path}'")
if len(contents) > 10:
return f"Found {len(contents)} files: {', '.join(contents[:10])}..."
else:
return f"Found {len(contents)} files: {', '.join(contents)}"
else:
coder.io.tool_output(f"📋 No files found in '{dir_path}'")
return f"No files found in directory"
except Exception as e:
coder.io.tool_error(f"Error in ls: {str(e)}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,46 @@
import os
# Keep the underscore prefix as this function is primarily for internal coder use
def _execute_make_editable(coder, file_path):
"""
Convert a read-only file to an editable file.
This allows the LLM to upgrade a file from read-only to editable
when it determines it needs to make changes to that file.
"""
try:
# Get absolute path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file is already editable
if abs_path in coder.abs_fnames:
coder.io.tool_output(f"📝 File '{file_path}' is already editable")
return f"File is already editable"
# Check if file exists on disk
if not os.path.isfile(abs_path):
coder.io.tool_output(f"⚠️ File '{file_path}' not found")
return f"Error: File not found"
# File exists, is not editable, might be read-only or not in context yet
was_read_only = False
if abs_path in coder.abs_read_only_fnames:
coder.abs_read_only_fnames.remove(abs_path)
was_read_only = True
# Add to editable files
coder.abs_fnames.add(abs_path)
if was_read_only:
coder.io.tool_output(f"📝 Moved '{file_path}' from read-only to editable")
return f"File is now editable (moved from read-only)"
else:
# File was not previously in context at all
coder.io.tool_output(f"📝 Added '{file_path}' directly to editable context")
# Track if added during exploration? Maybe not needed for direct MakeEditable.
# coder.files_added_in_exploration.add(rel_path) # Consider if needed
return f"File is now editable (added directly)"
except Exception as e:
coder.io.tool_error(f"Error in MakeEditable for '{file_path}': {str(e)}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,30 @@
def _execute_make_readonly(coder, file_path):
"""
Convert an editable file to a read-only file.
This allows the LLM to downgrade a file from editable to read-only
when it determines it no longer needs to make changes to that file.
"""
try:
# Get absolute path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
coder.io.tool_output(f"📚 File '{file_path}' is already read-only")
return f"File is already read-only"
else:
coder.io.tool_output(f"⚠️ File '{file_path}' not in context")
return f"File not in context"
# Move from editable to read-only
coder.abs_fnames.remove(abs_path)
coder.abs_read_only_fnames.add(abs_path)
coder.io.tool_output(f"📚 Made '{file_path}' read-only")
return f"File is now read-only"
except Exception as e:
coder.io.tool_error(f"Error making file read-only: {str(e)}")
return f"Error: {str(e)}"

45
aider/tools/remove.py Normal file
View file

@ -0,0 +1,45 @@
import time
def _execute_remove(coder, file_path):
"""
Explicitly remove a file from context.
This allows the LLM to clean up its context when files are no
longer needed, keeping the context focused and efficient.
"""
try:
# Get absolute path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file is in context (either editable or read-only)
removed = False
if abs_path in coder.abs_fnames:
# Don't remove if it's the last editable file and there are no read-only files
if len(coder.abs_fnames) <= 1 and not coder.abs_read_only_fnames:
coder.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context")
return f"Cannot remove - last file in context"
coder.abs_fnames.remove(abs_path)
removed = True
elif abs_path in coder.abs_read_only_fnames:
# Don't remove if it's the last read-only file and there are no editable files
if len(coder.abs_read_only_fnames) <= 1 and not coder.abs_fnames:
coder.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context")
return f"Cannot remove - last file in context"
coder.abs_read_only_fnames.remove(abs_path)
removed = True
if not removed:
coder.io.tool_output(f"⚠️ File '{file_path}' not in context")
return f"File not in context"
# Track in recently removed
coder.recently_removed[rel_path] = {
'removed_at': time.time()
}
coder.io.tool_output(f"🗑️ Explicitly removed '{file_path}' from context")
return f"Removed file from context"
except Exception as e:
coder.io.tool_error(f"Error removing file: {str(e)}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,65 @@
import traceback
from .tool_utils import (
ToolError,
validate_file_for_edit,
apply_change,
handle_tool_error,
generate_unified_diff_snippet,
format_tool_result,
)
def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=None, dry_run=False):
"""
Replace all occurrences of text in a file using utility functions.
"""
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
tool_name = "ReplaceAll"
try:
# 1. Validate file and get content
abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
# 2. Count occurrences
count = original_content.count(find_text)
if count == 0:
coder.io.tool_warning(f"Text '{find_text}' not found in file '{file_path}'")
return f"Warning: Text not found in file"
# 3. Perform the replacement
new_content = original_content.replace(find_text, replace_text)
if original_content == new_content:
coder.io.tool_warning(f"No changes made: replacement text is identical to original")
return f"Warning: No changes made (replacement identical to original)"
# 4. Generate diff for feedback
diff_examples = generate_unified_diff_snippet(original_content, new_content, rel_path)
# 5. Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}."
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_examples)
# 6. Apply Change (Not dry run)
metadata = {
'find_text': find_text,
'replace_text': replace_text,
'occurrences': count
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'replaceall', metadata, change_id
)
# 7. Format and return result
success_message = f"Replaced {count} occurrences in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_examples
)
except ToolError as e:
# Handle errors raised by utility functions
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

115
aider/tools/replace_line.py Normal file
View file

@ -0,0 +1,115 @@
import os
import traceback
def _execute_replace_line(coder, file_path, line_number, new_content, change_id=None, dry_run=False):
"""
Replace a specific line identified by line number.
Useful for fixing errors identified by error messages or linters.
Parameters:
- coder: The Coder instance
- file_path: Path to the file to modify
- line_number: The line number to replace (1-based)
- new_content: New content for the line
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
Returns a result message.
"""
try:
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file exists
if not os.path.isfile(abs_path):
coder.io.tool_error(f"File '{file_path}' not found")
return f"Error: File not found"
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.")
return f"Error: File is read-only. Use MakeEditable first."
else:
coder.io.tool_error(f"File '{file_path}' not in context")
return f"Error: File not in context"
# Reread file content immediately before modification
file_content = coder.io.read_text(abs_path)
if file_content is None:
coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceLine operation.")
return f"Error: Could not read file '{file_path}'"
# Split into lines
lines = file_content.splitlines()
# Validate line number
if not isinstance(line_number, int):
try:
line_number = int(line_number)
except ValueError:
coder.io.tool_error(f"Line number must be an integer, got '{line_number}'")
coder.io.tool_error(f"Invalid line_number value: '{line_number}'. Must be an integer.")
return f"Error: Invalid line_number value '{line_number}'"
# Convert 1-based line number to 0-based index
idx = line_number - 1
if idx < 0 or idx >= len(lines):
coder.io.tool_error(f"Line number {line_number} is out of range for file '{file_path}' (has {len(lines)} lines).")
return f"Error: Line number {line_number} out of range"
# Store original content for change tracking
original_content = file_content
original_line = lines[idx]
# Replace the line
lines[idx] = new_content
# Join lines back into a string
new_content_full = '\n'.join(lines)
if original_content == new_content_full:
coder.io.tool_warning("No changes made: new line content is identical to original")
return f"Warning: No changes made (new content identical to original)"
# Create a readable diff for the line replacement
diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}"
# Handle dry run
if dry_run:
coder.io.tool_output(f"Dry run: Would replace line {line_number} in {file_path}")
return f"Dry run: Would replace line {line_number}. Diff:\n{diff}"
# --- Apply Change (Not dry run) ---
coder.io.write_text(abs_path, new_content_full)
# Track the change
try:
metadata = {
'line_number': line_number,
'original_line': original_line,
'new_line': new_content
}
change_id = coder.change_tracker.track_change(
file_path=rel_path,
change_type='replaceline',
original_content=original_content,
new_content=new_content_full,
metadata=metadata,
change_id=change_id
)
except Exception as track_e:
coder.io.tool_error(f"Error tracking change for ReplaceLine: {track_e}")
change_id = "TRACKING_FAILED"
coder.aider_edited_files.add(rel_path)
# Improve feedback
coder.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})")
return f"Successfully replaced line {line_number} (change_id: {change_id}). Diff:\n{diff}"
except Exception as e:
coder.io.tool_error(f"Error in ReplaceLine: {str(e)}\n{traceback.format_exc()}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,128 @@
import os
import traceback
from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change
def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, change_id=None, dry_run=False):
"""
Replace a range of lines identified by line numbers.
Useful for fixing errors identified by error messages or linters.
Parameters:
- file_path: Path to the file to modify
- start_line: The first line number to replace (1-based)
- end_line: The last line number to replace (1-based)
- new_content: New content for the lines (can be multi-line)
- change_id: Optional ID for tracking the change
- dry_run: If True, simulate the change without modifying the file
Returns a result message.
"""
tool_name = "ReplaceLines"
try:
# Get absolute file path
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
# Check if file exists
if not os.path.isfile(abs_path):
raise ToolError(f"File '{file_path}' not found")
# Check if file is in editable context
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.")
else:
raise ToolError(f"File '{file_path}' not in context")
# Reread file content immediately before modification
file_content = coder.io.read_text(abs_path)
if file_content is None:
raise ToolError(f"Could not read file '{file_path}'")
# Convert line numbers to integers if needed
try:
start_line = int(start_line)
except ValueError:
raise ToolError(f"Invalid start_line value: '{start_line}'. Must be an integer.")
try:
end_line = int(end_line)
except ValueError:
raise ToolError(f"Invalid end_line value: '{end_line}'. Must be an integer.")
# Split into lines
lines = file_content.splitlines()
# Convert 1-based line numbers to 0-based indices
start_idx = start_line - 1
end_idx = end_line - 1
# Validate line numbers
if start_idx < 0 or start_idx >= len(lines):
raise ToolError(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).")
if end_idx < start_idx or end_idx >= len(lines):
raise ToolError(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).")
# Store original content for change tracking
original_content = file_content
replaced_lines = lines[start_idx:end_idx+1]
# Split the new content into lines
new_lines = new_content.splitlines()
# Perform the replacement
new_full_lines = lines[:start_idx] + new_lines + lines[end_idx+1:]
new_content_full = '\n'.join(new_full_lines)
if original_content == new_content_full:
coder.io.tool_warning("No changes made: new content is identical to original")
return f"Warning: No changes made (new content identical to original)"
# Generate diff snippet
diff_snippet = generate_unified_diff_snippet(original_content, new_content_full, rel_path)
# Create a readable diff for the lines replacement
diff = f"Lines {start_line}-{end_line}:\n"
# Add removed lines with - prefix
for line in replaced_lines:
diff += f"- {line}\n"
# Add separator
diff += "---\n"
# Add new lines with + prefix
for line in new_lines:
diff += f"+ {line}\n"
# Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}"
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# --- Apply Change (Not dry run) ---
metadata = {
'start_line': start_line,
'end_line': end_line,
'replaced_lines': replaced_lines,
'new_lines': new_lines
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content_full, 'replacelines', metadata, change_id
)
coder.aider_edited_files.add(rel_path)
replaced_count = end_line - start_line + 1
new_count = len(new_lines)
# Format and return result
success_message = f"Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions (expected errors)
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

View file

@ -0,0 +1,92 @@
import traceback
from .tool_utils import (
ToolError,
validate_file_for_edit,
apply_change,
handle_tool_error,
format_tool_result,
generate_unified_diff_snippet,
)
def _execute_replace_text(coder, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False):
"""
Replace specific text with new text, optionally using nearby context for disambiguation.
Uses utility functions for validation, finding occurrences, and applying changes.
"""
tool_name = "ReplaceText"
try:
# 1. Validate file and get content
abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
# 2. Find occurrences using helper function
# Note: _find_occurrences is currently on the Coder class, not in tool_utils
occurrences = coder._find_occurrences(original_content, find_text, near_context)
if not occurrences:
err_msg = f"Text '{find_text}' not found"
if near_context:
err_msg += f" near context '{near_context}'"
err_msg += f" in file '{file_path}'."
raise ToolError(err_msg)
# 3. Select the occurrence index
num_occurrences = len(occurrences)
try:
occurrence = int(occurrence)
if occurrence == -1:
if num_occurrences == 0:
raise ToolError(f"Text '{find_text}' not found, cannot select last occurrence.")
target_idx = num_occurrences - 1
elif 1 <= occurrence <= num_occurrences:
target_idx = occurrence - 1 # Convert 1-based to 0-based
else:
err_msg = f"Occurrence number {occurrence} is out of range. Found {num_occurrences} occurrences of '{find_text}'"
if near_context: err_msg += f" near '{near_context}'"
err_msg += f" in '{file_path}'."
raise ToolError(err_msg)
except ValueError:
raise ToolError(f"Invalid occurrence value: '{occurrence}'. Must be an integer.")
start_index = occurrences[target_idx]
# 4. Perform the replacement
new_content = original_content[:start_index] + replace_text + original_content[start_index + len(find_text):]
if original_content == new_content:
coder.io.tool_warning(f"No changes made: replacement text is identical to original")
return f"Warning: No changes made (replacement identical to original)"
# 5. Generate diff for feedback
# Note: _generate_diff_snippet is currently on the Coder class
diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path)
occurrence_str = f"occurrence {occurrence}" if num_occurrences > 1 else "text"
# 6. Handle dry run
if dry_run:
dry_run_message = f"Dry run: Would replace {occurrence_str} of '{find_text}' in {file_path}."
return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet)
# 7. Apply Change (Not dry run)
metadata = {
'start_index': start_index,
'find_text': find_text,
'replace_text': replace_text,
'near_context': near_context,
'occurrence': occurrence
}
final_change_id = apply_change(
coder, abs_path, rel_path, original_content, new_content, 'replacetext', metadata, change_id
)
# 8. Format and return result
success_message = f"Replaced {occurrence_str} in {file_path}"
return format_tool_result(
coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet
)
except ToolError as e:
# Handle errors raised by utility functions or explicitly raised here
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors
return handle_tool_error(coder, tool_name, e)

View file

@ -0,0 +1,92 @@
import os
from .tool_utils import ToolError, resolve_paths, handle_tool_error
def execute_show_numbered_context(coder, file_path, pattern=None, line_number=None, context_lines=3):
"""
Displays numbered lines from file_path centered around a target location
(pattern or line_number), without adding the file to context.
Uses utility functions for path resolution and error handling.
"""
tool_name = "ShowNumberedContext"
try:
# 1. Validate arguments
if not (pattern is None) ^ (line_number is None):
raise ToolError("Provide exactly one of 'pattern' or 'line_number'.")
# 2. Resolve path
abs_path, rel_path = resolve_paths(coder, file_path)
if not os.path.exists(abs_path):
# Check existence after resolving, as resolve_paths doesn't guarantee existence
raise ToolError(f"File not found: {file_path}")
# 3. Read file content
content = coder.io.read_text(abs_path)
if content is None:
raise ToolError(f"Could not read file: {file_path}")
lines = content.splitlines()
num_lines = len(lines)
# 4. Determine center line index
center_line_idx = -1
found_by = ""
if line_number is not None:
try:
line_number_int = int(line_number)
if 1 <= line_number_int <= num_lines:
center_line_idx = line_number_int - 1 # Convert to 0-based index
found_by = f"line {line_number_int}"
else:
raise ToolError(f"Line number {line_number_int} is out of range (1-{num_lines}) for {file_path}.")
except ValueError:
raise ToolError(f"Invalid line number '{line_number}'. Must be an integer.")
elif pattern is not None:
# TODO: Update this section for multiline pattern support later
first_match_line_idx = -1
for i, line in enumerate(lines):
if pattern in line:
first_match_line_idx = i
break
if first_match_line_idx != -1:
center_line_idx = first_match_line_idx
found_by = f"pattern '{pattern}' on line {center_line_idx + 1}"
else:
raise ToolError(f"Pattern '{pattern}' not found in {file_path}.")
if center_line_idx == -1:
# Should not happen if logic above is correct, but as a safeguard
raise ToolError("Internal error: Could not determine center line.")
# 5. Calculate context window
try:
context_lines_int = int(context_lines)
if context_lines_int < 0:
raise ValueError("Context lines must be non-negative")
except ValueError:
coder.io.tool_warning(f"Invalid context_lines value '{context_lines}', using default 3.")
context_lines_int = 3
start_line_idx = max(0, center_line_idx - context_lines_int)
end_line_idx = min(num_lines - 1, center_line_idx + context_lines_int)
# 6. Format output
# Use rel_path for user-facing messages
output_lines = [f"Displaying context around {found_by} in {rel_path}:"]
max_line_num_width = len(str(end_line_idx + 1)) # Width for padding
for i in range(start_line_idx, end_line_idx + 1):
line_num_str = str(i + 1).rjust(max_line_num_width)
output_lines.append(f"{line_num_str} | {lines[i]}")
# Log success and return the formatted context directly
coder.io.tool_output(f"Successfully retrieved context for {rel_path}")
return "\n".join(output_lines)
except ToolError as e:
# Handle expected errors raised by utility functions or validation
return handle_tool_error(coder, tool_name, e, add_traceback=False)
except Exception as e:
# Handle unexpected errors during processing
return handle_tool_error(coder, tool_name, e)

270
aider/tools/tool_utils.py Normal file
View file

@ -0,0 +1,270 @@
import difflib
import os
import re
import traceback
class ToolError(Exception):
"""Custom exception for tool-specific errors that should be reported to the LLM."""
pass
def resolve_paths(coder, file_path):
"""Resolves absolute and relative paths for a given file path."""
try:
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
return abs_path, rel_path
except Exception as e:
# Wrap unexpected errors during path resolution
raise ToolError(f"Error resolving path '{file_path}': {e}")
def validate_file_for_edit(coder, file_path):
"""
Validates if a file exists, is in context, and is editable.
Reads and returns original content if valid.
Raises ToolError on failure.
Returns:
tuple: (absolute_path, relative_path, original_content)
"""
abs_path, rel_path = resolve_paths(coder, file_path)
if not os.path.isfile(abs_path):
raise ToolError(f"File '{file_path}' not found")
if abs_path not in coder.abs_fnames:
if abs_path in coder.abs_read_only_fnames:
raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.")
else:
# File exists but is not in context at all
raise ToolError(f"File '{file_path}' not in context. Use View or MakeEditable first.")
# Reread content immediately before potential modification
content = coder.io.read_text(abs_path)
if content is None:
# This indicates an issue reading a file we know exists and is in context
coder.io.tool_error(f"Internal error: Could not read file '{file_path}' which should be accessible.")
raise ToolError(f"Could not read file '{file_path}'")
return abs_path, rel_path, content
def find_pattern_indices(lines, pattern, use_regex=False):
"""Finds all line indices matching a pattern."""
indices = []
for i, line in enumerate(lines):
if (use_regex and re.search(pattern, line)) or (not use_regex and pattern in line):
indices.append(i)
return indices
def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"):
"""
Selects the target 0-based index from a list of indices based on the 1-based occurrence parameter.
Raises ToolError if the pattern wasn't found or the occurrence is invalid.
"""
num_occurrences = len(indices)
if not indices:
raise ToolError(f"{pattern_desc} not found")
try:
occurrence = int(occurrence) # Ensure occurrence is an integer
if occurrence == -1: # Last occurrence
if num_occurrences == 0:
raise ToolError(f"{pattern_desc} not found, cannot select last occurrence.")
target_idx = num_occurrences - 1
elif 1 <= occurrence <= num_occurrences:
target_idx = occurrence - 1 # Convert 1-based to 0-based
else:
raise ToolError(f"Occurrence number {occurrence} is out of range for {pattern_desc}. Found {num_occurrences} occurrences.")
except ValueError:
raise ToolError(f"Invalid occurrence value: '{occurrence}'. Must be an integer.")
return indices[target_idx]
def determine_line_range(
coder,
file_path,
lines,
start_pattern_line_index=None, # Made optional
end_pattern=None,
line_count=None,
target_symbol=None,
pattern_desc="Block",
):
"""
Determines the end line index based on end_pattern or line_count.
Raises ToolError if end_pattern is not found or line_count is invalid.
"""
# Parameter validation: Ensure only one targeting method is used
targeting_methods = [
target_symbol is not None,
start_pattern_line_index is not None,
# Note: line_count and end_pattern depend on start_pattern_line_index
]
if sum(targeting_methods) > 1:
raise ToolError("Cannot specify target_symbol along with start_pattern.")
if sum(targeting_methods) == 0:
raise ToolError("Must specify either target_symbol or start_pattern.") # Or line numbers for line-based tools, handled elsewhere
if target_symbol:
if end_pattern or line_count:
raise ToolError("Cannot specify end_pattern or line_count when using target_symbol.")
try:
# Use repo_map to find the symbol's definition range
start_line, end_line = coder.repo_map.get_symbol_definition_location(file_path, target_symbol)
return start_line, end_line
except AttributeError: # Use specific exception
# Check if repo_map exists and is initialized before accessing methods
if not hasattr(coder, 'repo_map') or coder.repo_map is None:
raise ToolError("RepoMap is not available or not initialized.")
# If repo_map exists, the error might be from get_symbol_definition_location itself
# Re-raise ToolErrors directly
raise
except ToolError as e:
# Propagate specific ToolErrors from repo_map (not found, ambiguous, etc.)
raise e
except Exception as e:
# Catch other unexpected errors during symbol lookup
raise ToolError(f"Unexpected error looking up symbol '{target_symbol}': {e}")
# --- Existing logic for pattern/line_count based targeting ---
# Ensure start_pattern_line_index is provided if not using target_symbol
if start_pattern_line_index is None:
raise ToolError("Internal error: start_pattern_line_index is required when not using target_symbol.")
# Assign start_line here for the pattern-based logic path
start_line = start_pattern_line_index # Start of existing logic
start_line = start_pattern_line_index
end_line = -1
if end_pattern and line_count:
raise ToolError("Cannot specify both end_pattern and line_count")
if end_pattern:
found_end = False
# Search from the start_line onwards for the end_pattern
for i in range(start_line, len(lines)):
if end_pattern in lines[i]:
end_line = i
found_end = True
break
if not found_end:
raise ToolError(f"End pattern '{end_pattern}' not found after start pattern on line {start_line + 1}")
elif line_count:
try:
line_count = int(line_count)
if line_count <= 0:
raise ValueError("Line count must be positive")
# Calculate end line index, ensuring it doesn't exceed file bounds
end_line = min(start_line + line_count - 1, len(lines) - 1)
except ValueError:
raise ToolError(f"Invalid line_count value: '{line_count}'. Must be a positive integer.")
else:
# If neither end_pattern nor line_count is given, the range is just the start line
end_line = start_line
return start_line, end_line
def generate_unified_diff_snippet(original_content, new_content, file_path, context_lines=3):
"""
Generates a unified diff snippet between original and new content.
Args:
original_content (str): The original file content.
new_content (str): The modified file content.
file_path (str): The relative path to the file (for display in diff header).
context_lines (int): Number of context lines to show around changes.
Returns:
str: A formatted unified diff snippet, or an empty string if no changes.
"""
if original_content == new_content:
return ""
original_lines = original_content.splitlines(keepends=True)
new_lines = new_content.splitlines(keepends=True)
diff = difflib.unified_diff(
original_lines,
new_lines,
fromfile=f"a/{file_path}",
tofile=f"b/{file_path}",
n=context_lines, # Number of context lines
)
# Join the diff lines, potentially skipping the header if desired,
# but let's keep it for standard format.
diff_snippet = "".join(diff)
# Ensure snippet ends with a newline for cleaner formatting in results
if diff_snippet and not diff_snippet.endswith('\n'):
diff_snippet += '\n'
return diff_snippet
def apply_change(coder, abs_path, rel_path, original_content, new_content, change_type, metadata, change_id=None):
"""
Writes the new content, tracks the change, and updates coder state.
Returns the final change ID. Raises ToolError on tracking failure.
"""
coder.io.write_text(abs_path, new_content)
try:
final_change_id = coder.change_tracker.track_change(
file_path=rel_path,
change_type=change_type,
original_content=original_content,
new_content=new_content,
metadata=metadata,
change_id=change_id
)
except Exception as track_e:
# Log the error but also raise ToolError to inform the LLM
coder.io.tool_error(f"Error tracking change for {change_type}: {track_e}")
raise ToolError(f"Failed to track change: {track_e}")
coder.aider_edited_files.add(rel_path)
return final_change_id
def handle_tool_error(coder, tool_name, e, add_traceback=True):
"""Logs tool errors and returns a formatted error message for the LLM."""
error_message = f"Error in {tool_name}: {str(e)}"
if add_traceback:
error_message += f"\n{traceback.format_exc()}"
coder.io.tool_error(error_message)
# Return only the core error message to the LLM for brevity
return f"Error: {str(e)}"
def format_tool_result(coder, tool_name, success_message, change_id=None, diff_snippet=None, dry_run=False, dry_run_message=None):
"""Formats the result message for tool execution."""
if dry_run:
full_message = dry_run_message or f"Dry run: Would execute {tool_name}."
if diff_snippet:
full_message += f" Diff snippet:\n{diff_snippet}"
coder.io.tool_output(full_message) # Log the dry run action
return full_message
else:
# Use the provided success message, potentially adding change_id and diff
full_message = f"{success_message}"
if change_id:
full_message += f" (change_id: {change_id})"
coder.io.tool_output(full_message) # Log the success action
result_for_llm = f"Successfully executed {tool_name}."
if change_id:
result_for_llm += f" Change ID: {change_id}."
if diff_snippet:
result_for_llm += f" Diff snippet:\n{diff_snippet}"
return result_for_llm
# Example usage within a hypothetical tool:
# try:
# abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path)
# # ... tool specific logic to determine new_content and metadata ...
# if dry_run:
# return format_tool_result(coder, "MyTool", "", dry_run=True, diff_snippet=diff)
#
# change_id = apply_change(coder, abs_path, rel_path, original_content, new_content, 'mytool', metadata)
# return format_tool_result(coder, "MyTool", f"Applied change to {file_path}", change_id=change_id, diff_snippet=diff)
# except ToolError as e:
# return handle_tool_error(coder, "MyTool", e, add_traceback=False) # Don't need traceback for ToolErrors
# except Exception as e:
# return handle_tool_error(coder, "MyTool", e)

View file

@ -0,0 +1,56 @@
import traceback
def _execute_undo_change(coder, change_id=None, file_path=None):
"""
Undo a specific change by ID, or the last change to a file.
Parameters:
- coder: The Coder instance
- change_id: ID of the change to undo
- file_path: Path to file where the last change should be undone
Returns a result message.
"""
# Note: Undo does not have a dry_run parameter as it's inherently about reverting a previous action.
try:
# Validate parameters
if change_id is None and file_path is None:
coder.io.tool_error("Must specify either change_id or file_path for UndoChange")
return "Error: Must specify either change_id or file_path"
# If file_path is specified, get the most recent change for that file
if file_path:
abs_path = coder.abs_root_path(file_path)
rel_path = coder.get_rel_fname(abs_path)
change_id = coder.change_tracker.get_last_change(rel_path)
if not change_id:
coder.io.tool_error(f"No tracked changes found for file '{file_path}' to undo.")
return f"Error: No changes found for file '{file_path}'"
# Attempt to get undo information from the tracker
success, message, change_info = coder.change_tracker.undo_change(change_id)
if not success:
coder.io.tool_error(f"Failed to undo change '{change_id}': {message}")
return f"Error: {message}"
# Apply the undo by restoring the original content
if change_info:
file_path = change_info['file_path']
abs_path = coder.abs_root_path(file_path)
# Write the original content back to the file
coder.io.write_text(abs_path, change_info['original'])
coder.aider_edited_files.add(file_path) # Track that the file was modified by the undo
change_type = change_info['type']
coder.io.tool_output(f"✅ Undid {change_type} change '{change_id}' in {file_path}")
return f"Successfully undid {change_type} change '{change_id}'."
else:
# This case should ideally not be reached if tracker returns success
coder.io.tool_error(f"Failed to undo change '{change_id}': Change info missing after successful tracker update.")
return f"Error: Failed to undo change '{change_id}' (missing change info)"
except Exception as e:
coder.io.tool_error(f"Error in UndoChange: {str(e)}\n{traceback.format_exc()}")
return f"Error: {str(e)}"

13
aider/tools/view.py Normal file
View file

@ -0,0 +1,13 @@
def execute_view(coder, file_path):
"""
Explicitly add a file to context as read-only.
This gives the LLM explicit control over what files to view,
rather than relying on indirect mentions.
"""
try:
# Use the coder's helper, marking it as an explicit view request
return coder._add_file_to_context(file_path, explicit=True)
except Exception as e:
coder.io.tool_error(f"Error viewing file: {str(e)}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,55 @@
import os
import fnmatch
def execute_view_files_at_glob(coder, pattern):
"""
Execute a glob pattern and add matching files to context as read-only.
This tool helps the LLM find files by pattern matching, similar to
how a developer would use glob patterns to find files.
"""
try:
# Find files matching the pattern
matching_files = []
# Make the pattern relative to root if it's absolute
if pattern.startswith('/'):
pattern = os.path.relpath(pattern, coder.root)
# Get all files in the repo
all_files = coder.get_all_relative_files()
# Find matches with pattern matching
for file in all_files:
if fnmatch.fnmatch(file, pattern):
matching_files.append(file)
# Limit the number of files added if there are too many matches
if len(matching_files) > coder.max_files_per_glob:
coder.io.tool_output(
f"⚠️ Found {len(matching_files)} files matching '{pattern}', "
f"limiting to {coder.max_files_per_glob} most relevant files."
)
# Sort by modification time (most recent first)
matching_files.sort(key=lambda f: os.path.getmtime(coder.abs_root_path(f)), reverse=True)
matching_files = matching_files[:coder.max_files_per_glob]
# Add files to context
for file in matching_files:
# Use the coder's internal method to add files
coder._add_file_to_context(file)
# Return a user-friendly result
if matching_files:
if len(matching_files) > 10:
brief = ', '.join(matching_files[:5]) + f', and {len(matching_files)-5} more'
coder.io.tool_output(f"📂 Added {len(matching_files)} files matching '{pattern}': {brief}")
else:
coder.io.tool_output(f"📂 Added files matching '{pattern}': {', '.join(matching_files)}")
return f"Added {len(matching_files)} files: {', '.join(matching_files[:5])}{' and more' if len(matching_files) > 5 else ''}"
else:
coder.io.tool_output(f"⚠️ No files found matching '{pattern}'")
return f"No files found matching '{pattern}'"
except Exception as e:
coder.io.tool_error(f"Error in ViewFilesAtGlob: {str(e)}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,92 @@
import re
import os
import fnmatch
def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=False):
"""
Search for pattern (literal string or regex) in files and add matching files to context as read-only.
Args:
coder: The Coder instance.
search_pattern (str): The pattern to search for. Treated as a literal string by default.
file_pattern (str, optional): Glob pattern to filter which files are searched. Defaults to None (search all files).
regex (bool, optional): If True, treat search_pattern as a regular expression. Defaults to False.
This tool lets the LLM search for content within files, mimicking
how a developer would use grep or regex search to find relevant code.
"""
try:
# Get list of files to search
if file_pattern:
# Use glob pattern to filter files
all_files = coder.get_all_relative_files()
files_to_search = []
for file in all_files:
if fnmatch.fnmatch(file, file_pattern):
files_to_search.append(file)
if not files_to_search:
return f"No files matching '{file_pattern}' to search for pattern '{search_pattern}'"
else:
# Search all files if no pattern provided
files_to_search = coder.get_all_relative_files()
# Search for pattern in files
matches = {}
for file in files_to_search:
abs_path = coder.abs_root_path(file)
try:
with open(abs_path, 'r', encoding='utf-8') as f:
content = f.read()
match_count = 0
if regex:
try:
matches_found = re.findall(search_pattern, content)
match_count = len(matches_found)
except re.error as e:
# Handle invalid regex patterns gracefully
coder.io.tool_error(f"Invalid regex pattern '{search_pattern}': {e}")
# Skip this file for this search if regex is invalid
continue
else:
# Exact string matching
match_count = content.count(search_pattern)
if match_count > 0:
matches[file] = match_count
except Exception:
# Skip files that can't be read (binary, etc.)
pass
# Limit the number of files added if there are too many matches
if len(matches) > coder.max_files_per_glob:
coder.io.tool_output(
f"⚠️ Found '{search_pattern}' in {len(matches)} files, "
f"limiting to {coder.max_files_per_glob} files with most matches."
)
# Sort by number of matches (most matches first)
sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True)
matches = dict(sorted_matches[:coder.max_files_per_glob])
# Add matching files to context
for file in matches:
coder._add_file_to_context(file)
# Return a user-friendly result
if matches:
# Sort by number of matches (most matches first)
sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True)
match_list = [f"{file} ({count} matches)" for file, count in sorted_matches[:5]]
if len(sorted_matches) > 5:
coder.io.tool_output(f"🔍 Found '{search_pattern}' in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more")
return f"Found in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more"
else:
coder.io.tool_output(f"🔍 Found '{search_pattern}' in: {', '.join(match_list)}")
return f"Found in {len(matches)} files: {', '.join(match_list)}"
else:
coder.io.tool_output(f"⚠️ Pattern '{search_pattern}' not found in any files")
return f"Pattern not found in any files"
except Exception as e:
coder.io.tool_error(f"Error in ViewFilesMatching: {str(e)}")
return f"Error: {str(e)}"

View file

@ -0,0 +1,102 @@
import os
def _execute_view_files_with_symbol(coder, symbol):
"""
Find files containing a symbol using RepoMap and add them to context.
Checks files already in context first.
"""
if not coder.repo_map:
coder.io.tool_output("⚠️ Repo map not available, cannot use ViewFilesWithSymbol tool.")
return "Repo map not available"
if not symbol:
return "Error: Missing 'symbol' parameter for ViewFilesWithSymbol"
# --- Start Modification ---
# 1. Check files already in context
files_in_context = list(coder.abs_fnames) + list(coder.abs_read_only_fnames)
found_in_context = []
for abs_fname in files_in_context:
rel_fname = coder.get_rel_fname(abs_fname)
try:
# Use get_tags for consistency with RepoMap usage elsewhere for now.
tags = coder.repo_map.get_tags(abs_fname, rel_fname)
for tag in tags:
if tag.name == symbol:
found_in_context.append(rel_fname)
break # Found in this file, move to next
except Exception as e:
coder.io.tool_warning(f"Could not get symbols for {rel_fname} while checking context: {e}")
if found_in_context:
# Symbol found in already loaded files. Report this and stop.
file_list = ", ".join(sorted(list(set(found_in_context))))
coder.io.tool_output(f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search performed.")
return f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search performed."
# --- End Modification ---
# 2. If not found in context, search the repository using RepoMap
coder.io.tool_output(f"🔎 Searching for symbol '{symbol}' in repository (excluding current context)...")
try:
found_files = set()
current_context_files = coder.abs_fnames | coder.abs_read_only_fnames
files_to_search = set(coder.get_all_abs_files()) - current_context_files
rel_fname_to_abs = {}
all_tags = []
for fname in files_to_search:
rel_fname = coder.get_rel_fname(fname)
rel_fname_to_abs[rel_fname] = fname
try:
tags = coder.repo_map.get_tags(fname, rel_fname)
all_tags.extend(tags)
except Exception as e:
coder.io.tool_warning(f"Could not get tags for {rel_fname}: {e}")
# Find matching symbols
for tag in all_tags:
if tag.name == symbol:
# Use absolute path directly if available, otherwise resolve from relative path
abs_fname = rel_fname_to_abs.get(tag.rel_fname) or coder.abs_root_path(tag.fname)
if abs_fname in files_to_search: # Ensure we only add files we intended to search
found_files.add(abs_fname)
# Limit the number of files added
if len(found_files) > coder.max_files_per_glob:
coder.io.tool_output(
f"⚠️ Found symbol '{symbol}' in {len(found_files)} files, "
f"limiting to {coder.max_files_per_glob} most relevant files."
)
# Sort by modification time (most recent first) - approximate relevance
sorted_found_files = sorted(list(found_files), key=lambda f: os.path.getmtime(f), reverse=True)
found_files = set(sorted_found_files[:coder.max_files_per_glob])
# Add files to context (as read-only)
added_count = 0
added_files_rel = []
for abs_file_path in found_files:
rel_path = coder.get_rel_fname(abs_file_path)
# Double check it's not already added somehow
if abs_file_path not in coder.abs_fnames and abs_file_path not in coder.abs_read_only_fnames:
# Use explicit=True for clear output, even though it's an external search result
add_result = coder._add_file_to_context(rel_path, explicit=True)
if "Added" in add_result or "Viewed" in add_result: # Count successful adds/views
added_count += 1
added_files_rel.append(rel_path)
if added_count > 0:
if added_count > 5:
brief = ', '.join(added_files_rel[:5]) + f', and {added_count-5} more'
coder.io.tool_output(f"🔎 Found '{symbol}' and added {added_count} files: {brief}")
else:
coder.io.tool_output(f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}")
return f"Found symbol '{symbol}' and added {added_count} files as read-only."
else:
coder.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files (outside current context).")
return f"Symbol '{symbol}' not found in searchable files (outside current context)."
except Exception as e:
coder.io.tool_error(f"Error in ViewFilesWithSymbol: {str(e)}")
return f"Error: {str(e)}"