From e0da980c6e4c3a7484296ba60cff31f83edafa1e Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 08:23:09 -0400 Subject: [PATCH 01/63] Add Navigator --- aider/args.py | 7 + aider/coders/__init__.py | 2 + aider/coders/base_coder.py | 76 +- aider/coders/navigator_coder.py | 1246 +++++++++++++++++++++++++++++ aider/coders/navigator_prompts.py | 145 ++++ aider/commands.py | 42 + 6 files changed, 1512 insertions(+), 6 deletions(-) create mode 100644 aider/coders/navigator_coder.py create mode 100644 aider/coders/navigator_prompts.py diff --git a/aider/args.py b/aider/args.py index 6df19778b..cde44d3e8 100644 --- a/aider/args.py +++ b/aider/args.py @@ -158,6 +158,13 @@ def get_parser(default_config_files, git_root): const="architect", help="Use architect edit format for the main chat", ) + group.add_argument( + "--navigator", + action="store_const", + dest="edit_format", + const="navigator", + help="Use navigator edit format for the main chat (autonomous file management)", + ) group.add_argument( "--auto-accept-architect", action=argparse.BooleanOptionalAction, diff --git a/aider/coders/__init__.py b/aider/coders/__init__.py index 88bcddfaa..138540c61 100644 --- a/aider/coders/__init__.py +++ b/aider/coders/__init__.py @@ -9,6 +9,7 @@ from .editor_editblock_coder import EditorEditBlockCoder from .editor_whole_coder import EditorWholeFileCoder from .help_coder import HelpCoder from .patch_coder import PatchCoder +from .navigator_coder import NavigatorCoder from .udiff_coder import UnifiedDiffCoder from .udiff_simple import UnifiedDiffSimpleCoder from .wholefile_coder import WholeFileCoder @@ -31,4 +32,5 @@ __all__ = [ EditorWholeFileCoder, EditorDiffFencedCoder, ContextCoder, + NavigatorCoder, ] diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 675570c60..551039f8f 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -43,6 +43,10 @@ from ..dump import dump # noqa: F401 from .chat_chunks import ChatChunks +# Pattern to detect fenced search/replace blocks +SEARCH_REPLACE_FENCE = re.compile(r"```search_replace\n", re.MULTILINE) + + class UnknownEditFormat(ValueError): def __init__(self, edit_format, valid_formats): self.edit_format = edit_format @@ -111,6 +115,10 @@ class Coder: ignore_mentions = None chat_language = None file_watcher = None + + # Context management settings (for all modes) + context_management_enabled = False # Disabled by default except for navigator mode + large_file_token_threshold = 25000 # Files larger than this will be truncated when context management is enabled @classmethod def create( @@ -615,11 +623,36 @@ class Coder: prompt += relative_fname prompt += f"\n{self.fence[0]}\n" - prompt += content - - # lines = content.splitlines(keepends=True) - # lines = [f"{i+1:03}:{line}" for i, line in enumerate(lines)] - # prompt += "".join(lines) + # Apply context management if enabled for large files + if self.context_management_enabled: + # Calculate tokens for this file + file_tokens = self.main_model.token_count(content) + + if file_tokens > self.large_file_token_threshold: + # Truncate the file content + lines = content.splitlines() + total_lines = len(lines) + + # Keep the first and last parts of the file with a marker in between + keep_lines = self.large_file_token_threshold // 40 # Rough estimate of tokens per line + first_chunk = lines[:keep_lines//2] + last_chunk = lines[-(keep_lines//2):] + + truncated_content = "\n".join(first_chunk) + truncated_content += f"\n\n... [File truncated due to size ({file_tokens} tokens). Use /context-management to toggle truncation off] ...\n\n" + truncated_content += "\n".join(last_chunk) + + # Add message about truncation + self.io.tool_output( + f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). " + "Use /context-management to toggle truncation off if needed." + ) + + prompt += truncated_content + else: + prompt += content + else: + prompt += content prompt += f"{self.fence[1]}\n" @@ -634,7 +667,38 @@ class Coder: prompt += "\n" prompt += relative_fname prompt += f"\n{self.fence[0]}\n" - prompt += content + + # Apply context management if enabled for large files (same as get_files_content) + if self.context_management_enabled: + # Calculate tokens for this file + file_tokens = self.main_model.token_count(content) + + if file_tokens > self.large_file_token_threshold: + # Truncate the file content + lines = content.splitlines() + total_lines = len(lines) + + # Keep the first and last parts of the file with a marker in between + keep_lines = self.large_file_token_threshold // 40 # Rough estimate of tokens per line + first_chunk = lines[:keep_lines//2] + last_chunk = lines[-(keep_lines//2):] + + truncated_content = "\n".join(first_chunk) + truncated_content += f"\n\n... [File truncated due to size ({file_tokens} tokens). Use /context-management to toggle truncation off] ...\n\n" + truncated_content += "\n".join(last_chunk) + + # Add message about truncation + self.io.tool_output( + f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). " + "Use /context-management to toggle truncation off if needed." + ) + + prompt += truncated_content + else: + prompt += content + else: + prompt += content + prompt += f"{self.fence[1]}\n" return prompt diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py new file mode 100644 index 000000000..597ea590a --- /dev/null +++ b/aider/coders/navigator_coder.py @@ -0,0 +1,1246 @@ +import re +import fnmatch +import os +import time +import random +import subprocess +import traceback +from pathlib import Path +import xml.etree.ElementTree as ET +from xml.etree.ElementTree import ParseError + +from .base_coder import Coder +from .editblock_coder import find_original_update_blocks, do_replace, find_similar_lines +from .navigator_prompts import NavigatorPrompts +from aider.repo import ANY_GIT_ERROR +from aider import urls +# Import run_cmd_subprocess directly for non-interactive execution +from aider.run_cmd import run_cmd_subprocess + +class NavigatorCoder(Coder): + """Mode where the LLM autonomously manages which files are in context.""" + + edit_format = "navigator" + gpt_prompts = NavigatorPrompts() + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Dictionary to track recently removed files + self.recently_removed = {} + + # Configuration parameters + self.max_tool_calls = 100 # Maximum number of tool calls per response + + # Context management parameters + self.large_file_token_threshold = 25000 # Files larger than this in tokens are considered large + self.max_files_per_glob = 50 # Maximum number of files to add at once via glob/grep + + # Enable context management by default only in navigator mode + self.context_management_enabled = True # Enabled by default for navigator mode + + + # Track files added during current exploration + self.files_added_in_exploration = set() + + # Counter for tool calls + self.tool_call_count = 0 + + # Set high max reflections to allow many exploration rounds + # This controls how many automatic iterations the LLM can do + self.max_reflections = 15 + + # Enable enhanced context blocks by default + self.use_enhanced_context = True + + def format_chat_chunks(self): + """ + Override parent's format_chat_chunks to include enhanced context blocks. + """ + # First get the normal chat chunks from the parent method + chunks = super().format_chat_chunks() + + # If enhanced context blocks are enabled, prepend them to the current messages + if self.use_enhanced_context: + # Create an enhanced context message if we have the directory structure or git status + context_blocks = [] + + # Get directory structure + dir_structure = self.get_directory_structure() + if dir_structure: + context_blocks.append(dir_structure) + + # Get git status + git_status = self.get_git_status() + if git_status: + context_blocks.append(git_status) + + # If we have any context blocks, prepend them to the current messages + if context_blocks: + context_message = "\n\n".join(context_blocks) + # Prepend to system context but don't overwrite existing system content + if chunks.system: + # If we already have system messages, append our context to the first one + original_content = chunks.system[0]["content"] + chunks.system[0]["content"] = context_message + "\n\n" + original_content + else: + # Otherwise, create a new system message + chunks.system = [dict(role="system", content=context_message)] + + return chunks + + def reply_completed(self): + """Process the completed response from the LLM. + + This is a key method that: + 1. Processes any tool commands in the response + 2. If tool commands were found, sets up for another automatic round + 3. Otherwise, completes the response normally + + This enables the "auto-exploration" workflow where the LLM can + iteratively discover and analyze relevant files before providing + a final answer to the user's question. + """ + content = self.partial_response_content + if not content or not content.strip(): + return True + original_content = content # Keep the original response + + # Process tool commands: returns content with tool calls removed, results, and continue flag + processed_content, result_messages, continue_requested = self._process_tool_commands(content) + + # Since we are no longer suppressing, the partial_response_content IS the final content. + # We might want to update it to the processed_content (without tool calls) if we don't + # want the raw tool calls to remain in the final assistant message history. + # Let's update it for cleaner history. + self.partial_response_content = processed_content.strip() + + # Process implicit file mentions using the content *after* tool calls were removed + self._process_file_mentions(processed_content) + + # If continue was requested and we haven't exceeded reflection limits, set up for another iteration + if continue_requested and self.num_reflections < self.max_reflections: + # Reset tool counter for next iteration + self.tool_call_count = 0 + # Clear exploration files for the next round + self.files_added_in_exploration = set() + + # Get the original user question from the most recent user message + if self.cur_messages and len(self.cur_messages) >= 1: + for msg in reversed(self.cur_messages): + if msg["role"] == "user": + original_question = msg["content"] + break + else: + # Default if no user message found + original_question = "Please continue your exploration and provide a final answer." + + # Construct the message for the next turn, including tool results + next_prompt_parts = [] + next_prompt_parts.append( + "I have processed the results of the previous tool calls. " + "Let me analyze them and continue working towards your request." + ) + + if result_messages: + next_prompt_parts.append("\nResults from previous tool calls:") + # result_messages already have [Result (...): ...] format + next_prompt_parts.extend(result_messages) + next_prompt_parts.append("\nBased on these results and the updated file context, I will proceed.") + else: + next_prompt_parts.append("\nNo specific results were returned from the previous tool calls, but the file context may have been updated. I will proceed based on the current context.") + + next_prompt_parts.append(f"\nYour original question was: {original_question}") + + self.reflected_message = "\n".join(next_prompt_parts) + + self.io.tool_output("Continuing exploration...") + return False # Indicate that we need another iteration + else: + # Exploration finished for this turn. + # Append results to the content that will be stored in history. + if result_messages: + results_block = "\n\n" + "\n".join(result_messages) + # Append results to the cleaned content + self.partial_response_content += results_block + + # Check if the content contains the SEARCH/REPLACE markers + has_search = "<<<<<<< SEARCH" in self.partial_response_content + has_divider = "=======" in self.partial_response_content + has_replace = ">>>>>>> REPLACE" in self.partial_response_content + edit_match = has_search and has_divider and has_replace + + if edit_match: + self.io.tool_output("Detected edit blocks, applying changes within Navigator...") + edited_files = self._apply_edits_from_response() + # If _apply_edits_from_response set a reflected_message (due to errors), + # return False to trigger a reflection loop. + if self.reflected_message: + return False + else: + # No edits detected. + pass + + # After applying edits OR determining no edits were needed (and no reflection needed), + # the turn is complete. Reset counters and finalize history. + self.tool_call_count = 0 + self.files_added_in_exploration = set() + # Move cur_messages to done_messages + self.move_back_cur_messages(None) # Pass None as we handled commit message earlier if needed + return True # Indicate exploration is finished for this round + + def _process_tool_commands(self, content): + """ + Process tool commands in the `[tool_call(name, param=value)]` format within the content. + """ + result_messages = [] + modified_content = content # Start with original content + continue_requested = False + call_count = 0 + max_calls = self.max_tool_calls + + # Regex to find tool calls: [tool_call(name, key=value, key="value", ...)] + # It captures the tool name and the arguments string. + # It handles quoted and unquoted values. + tool_call_pattern = re.compile( + r"\[tool_call\(\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*" # Tool name + r"(?:,\s*(.*?))?\s*\)\]" # Optional arguments string (non-greedy) + ) + + # Regex to parse key=value pairs within the arguments string + # Handles key=value, key="value", key='value' + # Allows values to contain commas if quoted + args_pattern = re.compile( + r"([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*" # Key + r"(?:\"(.*?)\"|\'(.*?)\'|([^,\s\'\"]*))" # Value (quoted or unquoted) + ) + + processed_indices = set() # Keep track of processed match ranges + + for match in tool_call_pattern.finditer(content): + # Skip overlapping matches if a previous match already covered this area + if any(match.start() >= start and match.end() <= end for start, end in processed_indices): + continue + + call_count += 1 + if call_count > max_calls: + self.io.tool_warning(f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls.") + break + + tool_name = match.group(1) + args_str = match.group(2) or "" + full_match_str = match.group(0) + + # Handle Continue separately + if tool_name.lower() == 'continue': + continue_requested = True + # Remove this specific call from the content + modified_content = modified_content.replace(full_match_str, "", 1) + processed_indices.add((match.start(), match.end())) + continue # Don't process further, just note the request + + # Extract parameters + params = {} + suppressed_arg_values = ["..."] # Values to ignore during parsing + try: + for arg_match in args_pattern.finditer(args_str): + key = arg_match.group(1) + # Value can be in group 2 (double quotes), 3 (single quotes), or 4 (unquoted) + value = arg_match.group(2) or arg_match.group(3) or arg_match.group(4) + + # Check if the value is suppressed + if value in suppressed_arg_values: + self.io.tool_warning(f"Skipping suppressed argument value '{value}' for key '{key}' in tool '{tool_name}'") + continue # Skip this argument + + params[key] = value if value is not None else "" + except Exception as e: + result_messages.append(f"[Result ({tool_name}): Error parsing arguments '{args_str}': {e}]") + # Remove the malformed call from the content + modified_content = modified_content.replace(full_match_str, "", 1) + processed_indices.add((match.start(), match.end())) + continue # Skip execution if args parsing failed + + # Execute the tool based on its name + result_message = None + try: + # Normalize tool name for case-insensitive matching + norm_tool_name = tool_name.lower() + + if norm_tool_name == 'glob': + pattern = params.get('pattern') + if pattern is not None: + result_message = self._execute_glob(pattern) + else: + result_message = "Error: Missing 'pattern' parameter for Glob" + elif norm_tool_name == 'grep': + pattern = params.get('pattern') + file_pattern = params.get('file_pattern') # Optional + if pattern is not None: + result_message = self._execute_grep(pattern, file_pattern) + else: + result_message = "Error: Missing 'pattern' parameter for Grep" + elif norm_tool_name == 'ls': + directory = params.get('directory') + if directory is not None: + result_message = self._execute_ls(directory) + else: + result_message = "Error: Missing 'directory' parameter for Ls" + elif norm_tool_name == 'add': + file_path = params.get('file_path') + if file_path is not None: + result_message = self._execute_add(file_path) + else: + result_message = "Error: Missing 'file_path' parameter for Add" + elif norm_tool_name == 'remove': + file_path = params.get('file_path') + if file_path is not None: + result_message = self._execute_remove(file_path) + else: + result_message = "Error: Missing 'file_path' parameter for Remove" + elif norm_tool_name == 'makeeditable': + file_path = params.get('file_path') + if file_path is not None: + result_message = self._execute_make_editable(file_path) + else: + result_message = "Error: Missing 'file_path' parameter for MakeEditable" + elif norm_tool_name == 'makereadonly': + file_path = params.get('file_path') + if file_path is not None: + result_message = self._execute_make_readonly(file_path) + else: + result_message = "Error: Missing 'file_path' parameter for MakeReadonly" + elif norm_tool_name == 'find': + symbol = params.get('symbol') + if symbol is not None: + result_message = self._execute_find(symbol) + else: + result_message = "Error: Missing 'symbol' parameter for Find" + elif norm_tool_name == 'command': + command_string = params.get('command_string') + if command_string is not None: + result_message = self._execute_command(command_string) + else: + result_message = "Error: Missing 'command_string' parameter for Command" + else: + result_message = f"Error: Unknown tool name '{tool_name}'" + + except Exception as e: + result_message = f"Error executing {tool_name}: {str(e)}" + self.io.tool_error(f"Error during {tool_name} execution: {e}") + + if result_message: + result_messages.append(f"[Result ({tool_name}): {result_message}]") + + # Remove the processed tool call from the content + modified_content = modified_content.replace(full_match_str, "", 1) + processed_indices.add((match.start(), match.end())) + + # Update internal counter + self.tool_call_count += call_count + + return modified_content, result_messages, continue_requested + + def _apply_edits_from_response(self): + """ + Parses and applies SEARCH/REPLACE edits found in self.partial_response_content. + Returns a set of relative file paths that were successfully edited. + """ + edited_files = set() + try: + # 1. Get edits (logic from EditBlockCoder.get_edits) + # Use the current partial_response_content which contains the LLM response + # including the edit blocks but excluding the tool calls. + edits = list( + find_original_update_blocks( + self.partial_response_content, + self.fence, + self.get_inchat_relative_files(), + ) + ) + # Separate shell commands from file edits + self.shell_commands += [edit[1] for edit in edits if edit[0] is None] + edits = [edit for edit in edits if edit[0] is not None] + + # 2. Prepare edits (check permissions, commit dirty files) + prepared_edits = [] + seen_paths = dict() + self.need_commit_before_edits = set() # Reset before checking + + for edit in edits: + path = edit[0] + if path in seen_paths: + allowed = seen_paths[path] + else: + # Use the base Coder's permission check method + allowed = self.allowed_to_edit(path) + seen_paths[path] = allowed + if allowed: + prepared_edits.append(edit) + + # Commit any dirty files identified by allowed_to_edit + self.dirty_commit() + self.need_commit_before_edits = set() # Clear after commit + + # 3. Apply edits (logic adapted from EditBlockCoder.apply_edits) + failed = [] + passed = [] + for edit in prepared_edits: + path, original, updated = edit + full_path = self.abs_root_path(path) + new_content = None + + if Path(full_path).exists(): + content = self.io.read_text(full_path) + # Use the imported do_replace function + new_content = do_replace(full_path, content, original, updated, self.fence) + + # Simplified cross-file patching check from EditBlockCoder + if not new_content and original.strip(): + for other_full_path in self.abs_fnames: + if other_full_path == full_path: continue + other_content = self.io.read_text(other_full_path) + other_new_content = do_replace(other_full_path, other_content, original, updated, self.fence) + if other_new_content: + path = self.get_rel_fname(other_full_path) + full_path = other_full_path + new_content = other_new_content + self.io.tool_warning(f"Applied edit intended for {edit[0]} to {path}") + break + + if new_content: + if not self.dry_run: + self.io.write_text(full_path, new_content) + self.io.tool_output(f"Applied edit to {path}") + else: + self.io.tool_output(f"Did not apply edit to {path} (--dry-run)") + passed.append((path, original, updated)) # Store path relative to root + else: + failed.append(edit) + + if failed: + # Handle failed edits (adapted from EditBlockCoder) + blocks = "block" if len(failed) == 1 else "blocks" + error_message = f"# {len(failed)} SEARCH/REPLACE {blocks} failed to match!\n" + for edit in failed: + path, original, updated = edit + full_path = self.abs_root_path(path) + content = self.io.read_text(full_path) # Read content again for context + + error_message += f""" +## SearchReplaceNoExactMatch: This SEARCH block failed to exactly match lines in {path} +<<<<<<< SEARCH +{original}======= +{updated}>>>>>>> REPLACE + +""" + did_you_mean = find_similar_lines(original, content) + if did_you_mean: + error_message += f"""Did you mean to match some of these actual lines from {path}? + +{self.fence[0]} +{did_you_mean} +{self.fence[1]} + +""" + if updated in content and updated: + error_message += f"""Are you sure you need this SEARCH/REPLACE block? +The REPLACE lines are already in {path}! + +""" + error_message += ( + "The SEARCH section must exactly match an existing block of lines including all white" + " space, comments, indentation, docstrings, etc\n" + ) + if passed: + pblocks = "block" if len(passed) == 1 else "blocks" + error_message += f""" +# The other {len(passed)} SEARCH/REPLACE {pblocks} were applied successfully. +Don't re-send them. +Just reply with fixed versions of the {blocks} above that failed to match. +""" + self.io.tool_error(error_message) + # Set reflected_message to prompt LLM to fix the failed blocks + self.reflected_message = error_message + + edited_files = set(edit[0] for edit in passed) # Use relative paths stored in passed + + # 4. Post-edit actions (commit, lint, test, shell commands) + if edited_files: + self.aider_edited_files.update(edited_files) # Track edited files + saved_message = self.auto_commit(edited_files) + # We don't use saved_message here as we are not moving history back + + if self.auto_lint: + lint_errors = self.lint_edited(edited_files) + self.auto_commit(edited_files, context="Ran the linter") + if lint_errors and not self.reflected_message: # Reflect only if no edit errors + ok = self.io.confirm_ask("Attempt to fix lint errors?") + if ok: + self.reflected_message = lint_errors + + shared_output = self.run_shell_commands() + if shared_output: + # Add shell output as a new user message? Or just display? + # Let's just display for now to avoid complex history manipulation + self.io.tool_output("Shell command output:\n" + shared_output) + + if self.auto_test and not self.reflected_message: # Reflect only if no prior errors + test_errors = self.commands.cmd_test(self.test_cmd) + if test_errors: + ok = self.io.confirm_ask("Attempt to fix test errors?") + if ok: + self.reflected_message = test_errors + + self.show_undo_hint() + + except ValueError as err: + # Handle parsing errors from find_original_update_blocks + self.num_malformed_responses += 1 + error_message = err.args[0] + self.io.tool_error("The LLM did not conform to the edit format.") + self.io.tool_output(urls.edit_errors) + self.io.tool_output() + self.io.tool_output(str(error_message)) + self.reflected_message = str(error_message) # Reflect parsing errors + except ANY_GIT_ERROR as err: + self.io.tool_error(f"Git error during edit application: {str(err)}") + self.reflected_message = f"Git error during edit application: {str(err)}" + except Exception as err: + self.io.tool_error("Exception while applying edits:") + self.io.tool_error(str(err), strip=False) + traceback.print_exc() + self.reflected_message = f"Exception while applying edits: {str(err)}" + + return edited_files + + def _execute_glob(self, pattern): + """ + Execute a glob pattern and add matching files to context. + + This tool helps the LLM find files by pattern matching, similar to + how a developer would use glob patterns to find files. + """ + try: + # Find files matching the pattern + matching_files = [] + + # Make the pattern relative to root if it's absolute + if pattern.startswith('/'): + pattern = os.path.relpath(pattern, self.root) + + # Get all files in the repo + all_files = self.get_all_relative_files() + + # Find matches with pattern matching + for file in all_files: + if fnmatch.fnmatch(file, pattern): + matching_files.append(file) + + # Limit the number of files added if there are too many matches + if len(matching_files) > self.max_files_per_glob: + self.io.tool_output( + f"⚠️ Found {len(matching_files)} files matching '{pattern}', " + f"limiting to {self.max_files_per_glob} most relevant files." + ) + # Sort by modification time (most recent first) + matching_files.sort(key=lambda f: os.path.getmtime(self.abs_root_path(f)), reverse=True) + matching_files = matching_files[:self.max_files_per_glob] + + # Add files to context + for file in matching_files: + self._add_file_to_context(file) + + # Return a user-friendly result + if matching_files: + if len(matching_files) > 10: + brief = ', '.join(matching_files[:5]) + f', and {len(matching_files)-5} more' + self.io.tool_output(f"📂 Added {len(matching_files)} files matching '{pattern}': {brief}") + else: + self.io.tool_output(f"📂 Added files matching '{pattern}': {', '.join(matching_files)}") + return f"Added {len(matching_files)} files: {', '.join(matching_files[:5])}{' and more' if len(matching_files) > 5 else ''}" + else: + self.io.tool_output(f"⚠️ No files found matching '{pattern}'") + return f"No files found matching '{pattern}'" + except Exception as e: + self.io.tool_error(f"Error in glob: {str(e)}") + return f"Error: {str(e)}" + + def _execute_grep(self, search_pattern, file_pattern=None): + """ + Search for pattern in files and add matching files to context. + + This tool lets the LLM search for content within files, mimicking + how a developer would use grep to find relevant code. + """ + try: + # Get list of files to search + if file_pattern: + # Use glob pattern to filter files + all_files = self.get_all_relative_files() + files_to_search = [] + for file in all_files: + if fnmatch.fnmatch(file, file_pattern): + files_to_search.append(file) + + if not files_to_search: + return f"No files matching '{file_pattern}' to search for pattern '{search_pattern}'" + else: + # Search all files if no pattern provided + files_to_search = self.get_all_relative_files() + + # Search for pattern in files + matches = {} + for file in files_to_search: + abs_path = self.abs_root_path(file) + try: + with open(abs_path, 'r', encoding='utf-8') as f: + content = f.read() + if search_pattern in content: + matches[file] = content.count(search_pattern) + except Exception: + # Skip files that can't be read (binary, etc.) + pass + + # Limit the number of files added if there are too many matches + if len(matches) > self.max_files_per_glob: + self.io.tool_output( + f"⚠️ Found '{search_pattern}' in {len(matches)} files, " + f"limiting to {self.max_files_per_glob} files with most matches." + ) + # Sort by number of matches (most matches first) + sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True) + matches = dict(sorted_matches[:self.max_files_per_glob]) + + # Add matching files to context + for file in matches: + self._add_file_to_context(file) + + # Return a user-friendly result + if matches: + # Sort by number of matches (most matches first) + sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True) + match_list = [f"{file} ({count} matches)" for file, count in sorted_matches[:5]] + + if len(sorted_matches) > 5: + self.io.tool_output(f"🔍 Found '{search_pattern}' in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more") + return f"Found in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more" + else: + self.io.tool_output(f"🔍 Found '{search_pattern}' in: {', '.join(match_list)}") + return f"Found in {len(matches)} files: {', '.join(match_list)}" + else: + self.io.tool_output(f"⚠️ Pattern '{search_pattern}' not found in any files") + return f"Pattern not found in any files" + except Exception as e: + self.io.tool_error(f"Error in grep: {str(e)}") + return f"Error: {str(e)}" + + def _execute_ls(self, dir_path): + """ + List files in directory and optionally add some to context. + + This provides information about the structure of the codebase, + similar to how a developer would explore directories. + """ + try: + # Make the path relative to root if it's absolute + if dir_path.startswith('/'): + rel_dir = os.path.relpath(dir_path, self.root) + else: + rel_dir = dir_path + + # Get absolute path + abs_dir = self.abs_root_path(rel_dir) + + # Check if path exists + if not os.path.exists(abs_dir): + self.io.tool_output(f"⚠️ Directory '{dir_path}' not found") + return f"Directory not found" + + # Get directory contents + contents = [] + try: + with os.scandir(abs_dir) as entries: + for entry in entries: + if entry.is_file() and not entry.name.startswith('.'): + rel_path = os.path.join(rel_dir, entry.name) + contents.append(rel_path) + except NotADirectoryError: + # If it's a file, just return the file + contents = [rel_dir] + + if contents: + self.io.tool_output(f"📋 Listed {len(contents)} file(s) in '{dir_path}'") + if len(contents) > 10: + return f"Found {len(contents)} files: {', '.join(contents[:10])}..." + else: + return f"Found {len(contents)} files: {', '.join(contents)}" + else: + self.io.tool_output(f"📋 No files found in '{dir_path}'") + return f"No files found in directory" + except Exception as e: + self.io.tool_error(f"Error in ls: {str(e)}") + return f"Error: {str(e)}" + + def _execute_add(self, file_path): + """ + Explicitly add a file to context as read-only. + + This gives the LLM explicit control over what files to add, + rather than relying on indirect mentions. + """ + try: + return self._add_file_to_context(file_path, True) + except Exception as e: + self.io.tool_error(f"Error adding file: {str(e)}") + return f"Error: {str(e)}" + + def _add_file_to_context(self, file_path, explicit=False): + """ + Helper method to add a file to context as read-only. + + Parameters: + - file_path: Path to the file to add + - explicit: Whether this was an explicit add command (vs. implicit through glob/grep) + """ + # Check if file exists + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + if not os.path.isfile(abs_path): + self.io.tool_output(f"⚠️ File '{file_path}' not found") + return f"File not found" + + # Check if the file is already in context (either editable or read-only) + if abs_path in self.abs_fnames: + if explicit: + self.io.tool_output(f"📎 File '{file_path}' already in context as editable") + return f"File already in context as editable" + return f"File already in context as editable" + + if abs_path in self.abs_read_only_fnames: + if explicit: + self.io.tool_output(f"📎 File '{file_path}' already in context as read-only") + return f"File already in context as read-only" + return f"File already in context as read-only" + + # Add file to context as read-only + try: + # Check for large file and apply context management if enabled + content = self.io.read_text(abs_path) + if content is None: + return f"Error reading file: {file_path}" + + # Check if file is very large and context management is enabled + if self.context_management_enabled: + file_tokens = self.main_model.token_count(content) + if file_tokens > self.large_file_token_threshold: + self.io.tool_output( + f"⚠️ '{file_path}' is very large ({file_tokens} tokens). " + "Use /context-management to toggle truncation off if needed." + ) + + # Add to read-only files + self.abs_read_only_fnames.add(abs_path) + + # Track in exploration set + self.files_added_in_exploration.add(rel_path) + + # Inform user + if explicit: + self.io.tool_output(f"📎 Added '{file_path}' to context as read-only") + return f"Added file to context as read-only" + else: + # For implicit adds (from glob/grep), just return success + return f"Added file to context as read-only" + + except Exception as e: + self.io.tool_error(f"Error adding file '{file_path}': {str(e)}") + return f"Error adding file: {str(e)}" + + def _execute_make_editable(self, file_path): + """ + Convert a read-only file to an editable file. + + This allows the LLM to upgrade a file from read-only to editable + when it determines it needs to make changes to that file. + """ + try: + # Get absolute path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file is in read-only context + if abs_path not in self.abs_read_only_fnames: + if abs_path in self.abs_fnames: + self.io.tool_output(f"📝 File '{file_path}' is already editable") + return f"File is already editable" + else: + self.io.tool_output(f"⚠️ File '{file_path}' not in context") + return f"File not in context" + + # Move from read-only to editable + self.abs_read_only_fnames.remove(abs_path) + self.abs_fnames.add(abs_path) + + self.io.tool_output(f"📝 Made '{file_path}' editable") + return f"File is now editable" + except Exception as e: + self.io.tool_error(f"Error making file editable: {str(e)}") + return f"Error: {str(e)}" + + def _execute_make_readonly(self, file_path): + """ + Convert an editable file to a read-only file. + + This allows the LLM to downgrade a file from editable to read-only + when it determines it no longer needs to make changes to that file. + """ + try: + # Get absolute path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_output(f"📚 File '{file_path}' is already read-only") + return f"File is already read-only" + else: + self.io.tool_output(f"⚠️ File '{file_path}' not in context") + return f"File not in context" + + # Move from editable to read-only + self.abs_fnames.remove(abs_path) + self.abs_read_only_fnames.add(abs_path) + + self.io.tool_output(f"📚 Made '{file_path}' read-only") + return f"File is now read-only" + except Exception as e: + self.io.tool_error(f"Error making file read-only: {str(e)}") + return f"Error: {str(e)}" + + def _execute_remove(self, file_path): + """ + Explicitly remove a file from context. + + This allows the LLM to clean up its context when files are no + longer needed, keeping the context focused and efficient. + """ + try: + # Get absolute path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file is in context (either editable or read-only) + removed = False + if abs_path in self.abs_fnames: + # Don't remove if it's the last editable file and there are no read-only files + if len(self.abs_fnames) <= 1 and not self.abs_read_only_fnames: + self.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context") + return f"Cannot remove - last file in context" + self.abs_fnames.remove(abs_path) + removed = True + elif abs_path in self.abs_read_only_fnames: + # Don't remove if it's the last read-only file and there are no editable files + if len(self.abs_read_only_fnames) <= 1 and not self.abs_fnames: + self.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context") + return f"Cannot remove - last file in context" + self.abs_read_only_fnames.remove(abs_path) + removed = True + + if not removed: + self.io.tool_output(f"⚠️ File '{file_path}' not in context") + return f"File not in context" + + # Track in recently removed + self.recently_removed[rel_path] = { + 'removed_at': time.time() + } + + self.io.tool_output(f"🗑️ Explicitly removed '{file_path}' from context") + return f"Removed file from context" + except Exception as e: + self.io.tool_error(f"Error removing file: {str(e)}") + return f"Error: {str(e)}" + + def _execute_find(self, symbol): + """ + Find files containing a specific symbol and add them to context as read-only. + """ + try: + if not self.repo_map: + self.io.tool_output("⚠️ Repo map not available, cannot use Find tool.") + return "Repo map not available" + + if not symbol: + return "Error: Missing 'symbol' parameter for Find" + + self.io.tool_output(f"🔎 Searching for symbol '{symbol}'...") + found_files = set() + current_context_files = self.abs_fnames | self.abs_read_only_fnames + files_to_search = set(self.get_all_abs_files()) - current_context_files + + rel_fname_to_abs = {} + all_tags = [] + + for fname in files_to_search: + rel_fname = self.get_rel_fname(fname) + rel_fname_to_abs[rel_fname] = fname + try: + tags = self.repo_map.get_tags(fname, rel_fname) + all_tags.extend(tags) + except Exception as e: + self.io.tool_warning(f"Could not get tags for {rel_fname}: {e}") + + # Find matching symbols + for tag in all_tags: + if tag.name == symbol: + # Use absolute path directly if available, otherwise resolve from relative path + abs_fname = rel_fname_to_abs.get(tag.rel_fname) or self.abs_root_path(tag.fname) + if abs_fname in files_to_search: # Ensure we only add files we intended to search + found_files.add(abs_fname) + + # Limit the number of files added + if len(found_files) > self.max_files_per_glob: + self.io.tool_output( + f"⚠️ Found symbol '{symbol}' in {len(found_files)} files, " + f"limiting to {self.max_files_per_glob} most relevant files." + ) + # Sort by modification time (most recent first) - approximate relevance + sorted_found_files = sorted(list(found_files), key=lambda f: os.path.getmtime(f), reverse=True) + found_files = set(sorted_found_files[:self.max_files_per_glob]) + + # Add files to context (as read-only) + added_count = 0 + added_files_rel = [] + for abs_file_path in found_files: + rel_path = self.get_rel_fname(abs_file_path) + # Double check it's not already added somehow + if abs_file_path not in self.abs_fnames and abs_file_path not in self.abs_read_only_fnames: + add_result = self._add_file_to_context(rel_path, explicit=True) # Use explicit=True for clear output + if "Added" in add_result: + added_count += 1 + added_files_rel.append(rel_path) + + if added_count > 0: + if added_count > 5: + brief = ', '.join(added_files_rel[:5]) + f', and {added_count-5} more' + self.io.tool_output(f"🔎 Found '{symbol}' and added {added_count} files: {brief}") + else: + self.io.tool_output(f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}") + return f"Found symbol '{symbol}' and added {added_count} files as read-only." + else: + self.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files.") + return f"Symbol '{symbol}' not found in searchable files." + + except Exception as e: + self.io.tool_error(f"Error in find: {str(e)}") + return f"Error: {str(e)}" + + def _execute_command(self, command_string): + """ + Execute an aider command after user confirmation. + """ + try: + # Ask for confirmation before executing, allowing 'Always' + # Use the command string itself as the group key to remember preference per command + if not self.io.confirm_ask( + "Allow execution of this command?", + subject=command_string, + explicit_yes_required=True, # Require explicit 'yes' or 'always' + allow_never=True # Enable the 'Don't ask again' option + ): + # Check if the reason for returning False was *not* because it's remembered + # (confirm_ask returns False if 'n' or 'no' is chosen, even if remembered) + # We only want to skip if the user actively said no *this time* or if it's + # remembered as 'never' (which shouldn't happen with allow_never=True, + # but checking io.never_ask_group is robust). + # If the command is in never_ask_group with a True value (meaning Always), + # confirm_ask would have returned True directly. + # So, if confirm_ask returns False here, it means the user chose No this time. + self.io.tool_output(f"Skipped execution of shell command: {command_string}") + return "Shell command execution skipped by user." + + self.io.tool_output(f"⚙️ Executing non-interactive shell command: {command_string}") + + # Use run_cmd_subprocess for non-interactive execution + exit_status, combined_output = run_cmd_subprocess( + command_string, + verbose=self.verbose, + cwd=self.root # Execute in the project root + ) + + # Format the output for the result message, include more content + output_content = combined_output or "" + # Use the existing token threshold constant as the character limit for truncation + output_limit = self.large_file_token_threshold + if len(output_content) > output_limit: + # Truncate and add a clear message using the constant value + output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)" + + if exit_status == 0: + return f"Shell command executed successfully (exit code 0). Output:\n{output_content}" + else: + return f"Shell command failed with exit code {exit_status}. Output:\n{output_content}" + + except Exception as e: + self.io.tool_error(f"Error executing non-interactive shell command '{command_string}': {str(e)}") + # Optionally include traceback for debugging if verbose + # if self.verbose: + # self.io.tool_error(traceback.format_exc()) + return f"Error executing command: {str(e)}" + + + def _process_file_mentions(self, content): + """ + Process implicit file mentions in the content, adding files if they're not already in context. + + This handles the case where the LLM mentions file paths without using explicit tool commands. + """ + # Extract file mentions using the parent class's method + mentioned_files = set(self.get_file_mentions(content, ignore_current=False)) + current_files = set(self.get_inchat_relative_files()) + + # Get new files to add (not already in context) + new_files = mentioned_files - current_files + + # In navigator mode, we *only* add files via explicit tool commands. + # Do nothing here for implicit mentions. + pass + + + + + def check_for_file_mentions(self, content): + """ + Override parent's method to use our own file processing logic. + + Override parent's method to disable implicit file mention handling in navigator mode. + Files should only be added via explicit tool commands (`Add`, `Glob`, `Grep`). + """ + # Do nothing - disable implicit file adds in navigator mode. + pass + + def get_directory_structure(self): + """ + Generate a structured directory listing similar to Claude Code's directoryStructure. + Returns a formatted string representation of the directory tree. + """ + if not self.use_enhanced_context: + return None + + try: + # Start with the header + result = "Below is a snapshot of this project's file structure at the current time. It skips over .gitignore patterns.\n\n" + + # Get the root directory + root_path = Path(self.root) + root_str = str(root_path) + + # Get all files in the repo (both tracked and untracked) + if self.repo: + # Get tracked files + tracked_files = self.repo.get_tracked_files() + + # Get untracked files (files present in the working directory but not in git) + untracked_files = [] + try: + # Run git status to get untracked files + untracked_output = self.repo.repo.git.status('--porcelain') + for line in untracked_output.splitlines(): + if line.startswith('??'): + # Extract the filename (remove the '?? ' prefix) + untracked_file = line[3:] + if not self.repo.git_ignored_file(untracked_file): + untracked_files.append(untracked_file) + except Exception as e: + self.io.tool_warning(f"Error getting untracked files: {str(e)}") + + # Combine tracked and untracked files + all_files = tracked_files + untracked_files + else: + # If no repo, get all files relative to root + all_files = [] + for path in Path(self.root).rglob('*'): + if path.is_file(): + all_files.append(str(path.relative_to(self.root))) + + # Sort files to ensure deterministic output + all_files = sorted(all_files) + + # Filter out .aider files/dirs + all_files = [f for f in all_files if not any(part.startswith('.aider') for part in f.split('/'))] + + # Build tree structure + tree = {} + for file in all_files: + parts = file.split('/') + current = tree + for i, part in enumerate(parts): + if i == len(parts) - 1: # Last part (file) + if '.' not in current: + current['.'] = [] + current['.'].append(part) + else: # Directory + if part not in current: + current[part] = {} + current = current[part] + + # Function to recursively print the tree + def print_tree(node, prefix="- ", indent=" ", path=""): + lines = [] + # First print all directories + dirs = sorted([k for k in node.keys() if k != '.']) + for i, dir_name in enumerate(dirs): + full_path = f"{path}/{dir_name}" if path else dir_name + lines.append(f"{prefix}{full_path}/") + sub_lines = print_tree(node[dir_name], prefix=prefix, indent=indent, path=full_path) + for sub_line in sub_lines: + lines.append(f"{indent}{sub_line}") + + # Then print all files + if '.' in node: + for file_name in sorted(node['.']): + lines.append(f"{prefix}{path}/{file_name}" if path else f"{prefix}{file_name}") + + return lines + + # Generate the tree starting from root + tree_lines = print_tree(tree, prefix="- ") + result += "\n".join(tree_lines) + result += "\n" + + return result + except Exception as e: + self.io.tool_error(f"Error generating directory structure: {str(e)}") + return None + + def get_git_status(self): + """ + Generate a git status context block similar to Claude Code's gitStatus. + Returns a formatted string with git branch, status, and recent commits. + """ + if not self.use_enhanced_context or not self.repo: + return None + + try: + result = "This is a snapshot of the git status at the current time.\n" + + # Get current branch + try: + current_branch = self.repo.repo.active_branch.name + result += f"Current branch: {current_branch}\n\n" + except Exception: + result += "Current branch: (detached HEAD state)\n\n" + + # Get main/master branch + main_branch = None + try: + for branch in self.repo.repo.branches: + if branch.name in ('main', 'master'): + main_branch = branch.name + break + if main_branch: + result += f"Main branch (you will usually use this for PRs): {main_branch}\n\n" + except Exception: + pass + + # Git status + result += "Status:\n" + try: + # Get modified files + status = self.repo.repo.git.status('--porcelain') + + # Process and categorize the status output + if status: + status_lines = status.strip().split('\n') + + # Group by status type for better organization + staged_added = [] + staged_modified = [] + staged_deleted = [] + unstaged_modified = [] + unstaged_deleted = [] + untracked = [] + + for line in status_lines: + if len(line) < 4: # Ensure the line has enough characters + continue + + status_code = line[:2] + file_path = line[3:] + + # Skip .aider files/dirs + if any(part.startswith('.aider') for part in file_path.split('/')): + continue + + # Staged changes + if status_code[0] == 'A': + staged_added.append(file_path) + elif status_code[0] == 'M': + staged_modified.append(file_path) + elif status_code[0] == 'D': + staged_deleted.append(file_path) + # Unstaged changes + if status_code[1] == 'M': + unstaged_modified.append(file_path) + elif status_code[1] == 'D': + unstaged_deleted.append(file_path) + # Untracked files + if status_code == '??': + untracked.append(file_path) + + # Output in a nicely formatted manner + if staged_added: + for file in staged_added: + result += f"A {file}\n" + if staged_modified: + for file in staged_modified: + result += f"M {file}\n" + if staged_deleted: + for file in staged_deleted: + result += f"D {file}\n" + if unstaged_modified: + for file in unstaged_modified: + result += f" M {file}\n" + if unstaged_deleted: + for file in unstaged_deleted: + result += f" D {file}\n" + if untracked: + for file in untracked: + result += f"?? {file}\n" + else: + result += "Working tree clean\n" + except Exception as e: + result += f"Unable to get modified files: {str(e)}\n" + + # Recent commits + result += "\nRecent commits:\n" + try: + commits = list(self.repo.repo.iter_commits(max_count=5)) + for commit in commits: + short_hash = commit.hexsha[:8] + message = commit.message.strip().split('\n')[0] # First line only + result += f"{short_hash} {message}\n" + except Exception: + result += "Unable to get recent commits\n" + + result += "" + return result + except Exception as e: + self.io.tool_error(f"Error generating git status: {str(e)}") + return None + + def cmd_context_blocks(self, args=""): + """ + Toggle enhanced context blocks feature. + """ + self.use_enhanced_context = not self.use_enhanced_context + + if self.use_enhanced_context: + self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.") + else: + self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.") + + return True diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py new file mode 100644 index 000000000..1de3d9b44 --- /dev/null +++ b/aider/coders/navigator_prompts.py @@ -0,0 +1,145 @@ +# flake8: noqa: E501 + +from .base_prompts import CoderPrompts + + +class NavigatorPrompts(CoderPrompts): + main_system = """Act as an expert software engineer with the ability to autonomously navigate and modify a codebase. + +You have the unique ability to control which files are visible in the conversation using special tool commands, structured as `[tool_call(tool_name, param1=value1, param2="value2")]`. +Use these tools to effectively manage context and find relevant files: + +`[tool_call(Glob, pattern="**/*.py")]` - Find files matching a glob pattern and add them to context as read-only. + +`[tool_call(Grep, pattern="class User", file_pattern="*.py")]` - Search for text in files and add matching files to context as read-only. `file_pattern` is optional. + +`[tool_call(Ls, directory="src/components")]` - List files in a directory. + +`[tool_call(Add, file_path="src/main.py")]` - Explicitly add a specific file to context as read-only. + +`[tool_call(Remove, file_path="tests/old_test.py")]` - Explicitly remove a specific file from context when no longer needed. This tool accepts only a single file path, not glob patterns. + +`[tool_call(MakeEditable, file_path="src/main.py")]` - Convert a read-only file to an editable file. + +`[tool_call(MakeReadonly, file_path="src/main.py")]` - Convert an editable file to a read-only file. + +`[tool_call(Find, symbol="my_function")]` - Find files containing a specific symbol (function, class, variable) and add them to context as read-only. + +`[tool_call(Command, command_string="git diff HEAD~1")]` - Execute a *shell* command (like `ls`, `cat`, `git diff`). Requires user confirmation. **Do NOT use this for aider commands starting with `/` (like `/add`, `/run`, `/diff`).** + +`[tool_call(Continue)]` - Continue exploration in the next round with the current files. + +Guidelines for using these tools: +- Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for all tool commands you want to *execute*. Tool names are case-insensitive. Parameter values can be unquoted or enclosed in single/double quotes. +- **Check if a file is already in context (editable or read-only) before using `Add`, `Glob`, or `Grep` to avoid duplicates.** +- Start by exploring the codebase with tools to gather necessary context. +- Search strategically: use specific patterns for grep/glob/find to avoid overwhelming the context. +- **Context Management:** Keep the context focused. Consider using `[tool_call(Remove, file_path="...")]` to remove files that are clearly no longer relevant to the current task, especially large or truncated ones added during exploration. However, retain files that might be useful for understanding the broader context or for subsequent steps. +- Files are added as read-only by default; use the `MakeEditable` tool only for files you need to modify. +- Only if you absolutely need the full content of a truncated file that's crucial to the task, tell the user to use '/context-management' to toggle context management OFF so you can see the complete file. +- IMPORTANT: Always include `[tool_call(Continue)]` at the end of your response *only if* you want to see the results of your tool calls and continue exploring *in the next turn*. If you +don't include this, or if you are asking the user for clarification or direction, your exploration will stop for this turn, and you should wait for the user's response before proceeding. +- When you have all the information you need, or when you need input from the user, provide your response WITHOUT using any tool commands (especially `[tool_call(Continue)]`). +- Tool calls will be visible in your response. + +When working with code: +- Always check for relevant files before implementing changes +- If you need to understand a specific area of the codebase, use grep to locate it +- Be precise in your file manipulation to maintain a focused context +- Remember that adding too many files dilutes the context + +Always reply to the user in {language}. +""" + + example_messages = [] # Keep examples empty for now, or update them to use the new syntax + + files_content_prefix = """These files have been added to the chat so you can see all of their contents. +Trust this message as the true contents of the files! +""" + + files_content_assistant_reply = ( + "I understand. I'll use these files to help with your request." + ) + + files_no_full_files = "I don't have full contents of any files yet. I'll add them as needed." + + files_no_full_files_with_repo_map = "" + files_no_full_files_with_repo_map_reply = "" + + repo_content_prefix = """I am working with code in a git repository. +Here are summaries of some files present in this repo. +I can add any file to our chat by mentioning its path. +""" + + system_reminder = """ +Always consider which files are needed for the current task. +Remember to use the following tool commands using the `[tool_call(...)]` syntax: `Glob`, `Grep`, `Ls`, `Add`, `Remove`, `MakeEditable`, `MakeReadonly`, `Find`, `Command`, `Continue`. + +**CRITICAL FORMATTING REQUIREMENTS:** +1. All tool commands you want to *execute* **MUST** use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]`. Example: `[tool_call(Add, file_path="src/main.py")]`. Commands written this way **WILL BE EXECUTED**. +2. If you need to *show* or *discuss* a tool call example without executing it, you **MUST** escape it by adding a backslash `\` before the opening bracket. Example: `\[tool_call(Add, file_path="src/main.py")]`. Commands written this way **WILL NOT BE EXECUTED**. + +Note: You have access to enhanced context blocks with a complete directory structure and git status information. These provide a comprehensive view of the codebase structure and changes. +Refer to these context blocks to find relevant files more efficiently. + +If you need to find more information, use tool commands (in the correct format!) as you answer. If you need to see more files before you can answer completely, use tool commands (in the +correct format!) and end with `[tool_call(Continue)]`. + +**When you have finished exploring IF you have been asked to propose code changes:** +1. Ensure you have used `[tool_call(MakeEditable, file_path="...")]` for all files you intend to modify. +2. Think step-by-step and explain the needed changes in a few short sentences. +3. Describe each change with a *SEARCH/REPLACE block*. + +# *SEARCH/REPLACE block* Rules: + +Every *SEARCH/REPLACE block* must use this format: +1. The opening fence and code language, eg: ```python +2. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc. +3. The start of search block: <<<<<<< SEARCH +4. A contiguous chunk of lines to search for in the existing source code +5. The dividing line: ======= +6. The lines to replace into the source code +7. The end of the replace block: >>>>>>> REPLACE +8. The closing fence: ``` + +Use the *FULL* file path, as shown to you by the user. +{quad_backtick_reminder} +Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc. +If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup. + +*SEARCH/REPLACE* blocks will *only* replace the first match occurrence. +Including multiple unique *SEARCH/REPLACE* blocks if needed. +Include enough lines in each SEARCH section to uniquely match each set of lines that need to change. + +Keep *SEARCH/REPLACE* blocks concise. +Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file. +Include just the changing lines, and a few surrounding lines if needed for uniqueness. +Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks. + +To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location. + +Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file. + +If you want to put code in a new file, use a *SEARCH/REPLACE block* with: +- A new file path, including dir name if needed +- An empty `SEARCH` section +- The new file's contents in the `REPLACE` section + +To rename files which have been added to the chat, use shell commands at the end of your response. + +If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed. +The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks. + +{lazy_prompt} +ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*! +{shell_cmd_reminder} + +4. **IMPORTANT:** Do **NOT** include `[tool_call(Continue)]` in your response when you are providing code edits. Your response should contain only the explanation and the edit blocks. + +If you are providing a final answer, explanation, or asking the user a question *without* proposing code edits, simply provide your response text without any tool calls (especially +`[tool_call(Continue)]`). + +To toggle these enhanced context blocks, the user can use the /context-blocks command. +""" + +try_again = """""" diff --git a/aider/commands.py b/aider/commands.py index 81fc80093..03d74899c 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -1032,6 +1032,36 @@ class Commands: "Exit the application" self.cmd_exit(args) + def cmd_context_management(self, args=""): + "Toggle context management for large files" + if not hasattr(self.coder, 'context_management_enabled'): + self.io.tool_error("Context management is only available in navigator mode.") + return + + # Toggle the setting + self.coder.context_management_enabled = not self.coder.context_management_enabled + + # Report the new state + if self.coder.context_management_enabled: + self.io.tool_output("Context management is now ON - large files may be truncated.") + else: + self.io.tool_output("Context management is now OFF - files will not be truncated.") + + def cmd_context_blocks(self, args=""): + "Toggle enhanced context blocks (directory structure and git status)" + if not hasattr(self.coder, 'use_enhanced_context'): + self.io.tool_error("Enhanced context blocks are only available in navigator mode.") + return + + # Toggle the setting + self.coder.use_enhanced_context = not self.coder.use_enhanced_context + + # Report the new state + if self.coder.use_enhanced_context: + self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.") + else: + self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.") + def cmd_ls(self, args): "List all known files and indicate which are included in the chat session" @@ -1149,6 +1179,9 @@ class Commands: def completions_context(self): raise CommandCompletionException() + + def completions_navigator(self): + raise CommandCompletionException() def cmd_ask(self, args): """Ask questions about the code base without editing any files. If no prompt provided, switches to ask mode.""" # noqa @@ -1165,6 +1198,15 @@ class Commands: def cmd_context(self, args): """Enter context mode to see surrounding code context. If no prompt provided, switches to context mode.""" # noqa return self._generic_chat_command(args, "context", placeholder=args.strip() or None) + + def cmd_navigator(self, args): + """Enter navigator mode to autonomously discover and manage relevant files. If no prompt provided, switches to navigator mode.""" # noqa + # Enable context management when entering navigator mode + if hasattr(self.coder, 'context_management_enabled'): + self.coder.context_management_enabled = True + self.io.tool_output("Context management enabled for large files") + + return self._generic_chat_command(args, "navigator", placeholder=args.strip() or None) def _generic_chat_command(self, args, edit_format, placeholder=None): if not args.strip(): From 97512e99ed7a8a6cf4b8d4ff313155d5d8108029 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 11:11:59 -0400 Subject: [PATCH 02/63] Try to 'improve' prompts. Will see if these are better. --- aider/coders/navigator_coder.py | 96 +++++++++- aider/coders/navigator_prompts.py | 308 ++++++++++++++++++++---------- 2 files changed, 292 insertions(+), 112 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 597ea590a..72212c141 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -5,6 +5,9 @@ import time import random import subprocess import traceback +import platform +import locale +from datetime import datetime from pathlib import Path import xml.etree.ElementTree as ET from xml.etree.ElementTree import ParseError @@ -55,23 +58,29 @@ class NavigatorCoder(Coder): def format_chat_chunks(self): """ - Override parent's format_chat_chunks to include enhanced context blocks. + Override parent's format_chat_chunks to include enhanced context blocks with a + cleaner, more hierarchical structure for better organization. """ # First get the normal chat chunks from the parent method chunks = super().format_chat_chunks() # If enhanced context blocks are enabled, prepend them to the current messages if self.use_enhanced_context: - # Create an enhanced context message if we have the directory structure or git status - context_blocks = [] + # Create environment info context block + env_context = self.get_environment_info() # Get directory structure dir_structure = self.get_directory_structure() - if dir_structure: - context_blocks.append(dir_structure) # Get git status git_status = self.get_git_status() + + # Collect all context blocks that exist + context_blocks = [] + if env_context: + context_blocks.append(env_context) + if dir_structure: + context_blocks.append(dir_structure) if git_status: context_blocks.append(git_status) @@ -89,6 +98,57 @@ class NavigatorCoder(Coder): return chunks + def get_environment_info(self): + """ + Generate an environment information context block with key system details. + Returns formatted string with working directory, platform, date, and other relevant environment details. + """ + if not self.use_enhanced_context: + return None + + try: + # Get current date in ISO format + current_date = datetime.now().strftime("%Y-%m-%d") + + # Get platform information + platform_info = platform.platform() + + # Get language preference + language = self.chat_language or locale.getlocale()[0] or "en-US" + + result = "\n" + result += "## Environment Information\n\n" + result += f"- Working directory: {self.root}\n" + result += f"- Current date: {current_date}\n" + result += f"- Platform: {platform_info}\n" + result += f"- Language preference: {language}\n" + + # Add git repo information if available + if self.repo: + try: + rel_repo_dir = self.repo.get_rel_repo_dir() + num_files = len(self.repo.get_tracked_files()) + result += f"- Git repository: {rel_repo_dir} with {num_files:,} files\n" + except Exception: + result += "- Git repository: active but details unavailable\n" + else: + result += "- Git repository: none\n" + + # Add enabled features information + features = [] + if self.context_management_enabled: + features.append("context management") + if self.use_enhanced_context: + features.append("enhanced context blocks") + if features: + result += f"- Enabled features: {', '.join(features)}\n" + + result += "" + return result + except Exception as e: + self.io.tool_error(f"Error generating environment info: {str(e)}") + return None + def reply_completed(self): """Process the completed response from the LLM. @@ -1021,10 +1081,24 @@ Just reply with fixed versions of the {blocks} above that failed to match. """ # Do nothing - disable implicit file adds in navigator mode. pass + + def preproc_user_input(self, inp): + """ + Override parent's method to wrap user input in a context block. + This clearly delineates user input from other sections in the context window. + """ + # First apply the parent's preprocessing + inp = super().preproc_user_input(inp) + + # If we still have input after preprocessing, wrap it in a context block + if inp and not inp.startswith(""): + inp = f"\n{inp}\n" + + return inp def get_directory_structure(self): """ - Generate a structured directory listing similar to Claude Code's directoryStructure. + Generate a structured directory listing of the project file structure. Returns a formatted string representation of the directory tree. """ if not self.use_enhanced_context: @@ -1032,7 +1106,9 @@ Just reply with fixed versions of the {blocks} above that failed to match. try: # Start with the header - result = "Below is a snapshot of this project's file structure at the current time. It skips over .gitignore patterns.\n\n" + result = "\n" + result += "## Project File Structure\n\n" + result += "Below is a snapshot of this project's file structure at the current time. It skips over .gitignore patterns.\n\n" # Get the root directory root_path = Path(self.root) @@ -1118,14 +1194,16 @@ Just reply with fixed versions of the {blocks} above that failed to match. def get_git_status(self): """ - Generate a git status context block similar to Claude Code's gitStatus. + Generate a git status context block for repository information. Returns a formatted string with git branch, status, and recent commits. """ if not self.use_enhanced_context or not self.repo: return None try: - result = "This is a snapshot of the git status at the current time.\n" + result = "\n" + result += "## Git Repository Status\n\n" + result += "This is a snapshot of the git status at the current time.\n" # Get current branch try: diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 1de3d9b44..068e17ad7 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -4,142 +4,244 @@ from .base_prompts import CoderPrompts class NavigatorPrompts(CoderPrompts): - main_system = """Act as an expert software engineer with the ability to autonomously navigate and modify a codebase. + """ + Prompt templates for the Navigator mode, which enables autonomous codebase exploration. + + The NavigatorCoder uses these prompts to guide its behavior when exploring and modifying + a codebase using special tool commands like Glob, Grep, Add, etc. This mode enables the + LLM to manage its own context by adding/removing files and executing commands. + """ + + main_system = """ +## Role and Purpose +Act as an expert software engineer with the ability to autonomously navigate and modify a codebase. -You have the unique ability to control which files are visible in the conversation using special tool commands, structured as `[tool_call(tool_name, param1=value1, param2="value2")]`. -Use these tools to effectively manage context and find relevant files: +## Response Style Guidelines +- Be concise and direct in your responses +- Focus on answering the specific question asked +- For complex tasks, provide structured step-by-step explanations +- When exploring, clearly indicate your search strategy +- When editing, explain your changes briefly before presenting edit blocks +- For ambiguous references to lists or items, prioritize user-mentioned content over system-defined items +- Use markdown for formatting when appropriate +- End with a clear call-to-action or conclusion + -`[tool_call(Glob, pattern="**/*.py")]` - Find files matching a glob pattern and add them to context as read-only. + +## Available Tools -`[tool_call(Grep, pattern="class User", file_pattern="*.py")]` - Search for text in files and add matching files to context as read-only. `file_pattern` is optional. +### File Discovery Tools +- **Glob**: `[tool_call(Glob, pattern="**/*.py")]` + Find files matching a glob pattern and add them to context as read-only. + Supports patterns like "src/**/*.ts" or "*.json". -`[tool_call(Ls, directory="src/components")]` - List files in a directory. +- **Grep**: `[tool_call(Grep, pattern="class User", file_pattern="*.py")]` + Search for text in files and add matching files to context as read-only. + Files with more matches are prioritized. `file_pattern` is optional. -`[tool_call(Add, file_path="src/main.py")]` - Explicitly add a specific file to context as read-only. +- **Ls**: `[tool_call(Ls, directory="src/components")]` + List files in a directory. Useful for exploring the project structure. -`[tool_call(Remove, file_path="tests/old_test.py")]` - Explicitly remove a specific file from context when no longer needed. This tool accepts only a single file path, not glob patterns. +- **Find**: `[tool_call(Find, symbol="my_function")]` + Find files containing a specific symbol (function, class, variable) and add them to context as read-only. + Leverages the repo map for accurate symbol lookup. -`[tool_call(MakeEditable, file_path="src/main.py")]` - Convert a read-only file to an editable file. +### Context Management Tools +- **Add**: `[tool_call(Add, file_path="src/main.py")]` + Explicitly add a specific file to context as read-only. -`[tool_call(MakeReadonly, file_path="src/main.py")]` - Convert an editable file to a read-only file. +- **Remove**: `[tool_call(Remove, file_path="tests/old_test.py")]` + Explicitly remove a file from context when no longer needed. + Accepts a single file path, not glob patterns. -`[tool_call(Find, symbol="my_function")]` - Find files containing a specific symbol (function, class, variable) and add them to context as read-only. +- **MakeEditable**: `[tool_call(MakeEditable, file_path="src/main.py")]` + Convert a read-only file to an editable file. Required before making changes. -`[tool_call(Command, command_string="git diff HEAD~1")]` - Execute a *shell* command (like `ls`, `cat`, `git diff`). Requires user confirmation. **Do NOT use this for aider commands starting with `/` (like `/add`, `/run`, `/diff`).** +- **MakeReadonly**: `[tool_call(MakeReadonly, file_path="src/main.py")]` + Convert an editable file back to read-only status. -`[tool_call(Continue)]` - Continue exploration in the next round with the current files. +### Other Tools +- **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` + Execute a shell command. Requires user confirmation. + **Do NOT use this for aider commands starting with `/` (like `/add`, `/run`, `/diff`).** -Guidelines for using these tools: -- Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for all tool commands you want to *execute*. Tool names are case-insensitive. Parameter values can be unquoted or enclosed in single/double quotes. -- **Check if a file is already in context (editable or read-only) before using `Add`, `Glob`, or `Grep` to avoid duplicates.** -- Start by exploring the codebase with tools to gather necessary context. -- Search strategically: use specific patterns for grep/glob/find to avoid overwhelming the context. -- **Context Management:** Keep the context focused. Consider using `[tool_call(Remove, file_path="...")]` to remove files that are clearly no longer relevant to the current task, especially large or truncated ones added during exploration. However, retain files that might be useful for understanding the broader context or for subsequent steps. -- Files are added as read-only by default; use the `MakeEditable` tool only for files you need to modify. -- Only if you absolutely need the full content of a truncated file that's crucial to the task, tell the user to use '/context-management' to toggle context management OFF so you can see the complete file. -- IMPORTANT: Always include `[tool_call(Continue)]` at the end of your response *only if* you want to see the results of your tool calls and continue exploring *in the next turn*. If you -don't include this, or if you are asking the user for clarification or direction, your exploration will stop for this turn, and you should wait for the user's response before proceeding. -- When you have all the information you need, or when you need input from the user, provide your response WITHOUT using any tool commands (especially `[tool_call(Continue)]`). -- Tool calls will be visible in your response. +- **Continue**: `[tool_call(Continue)]` + Continue exploration in the next round with the current files. + This enables multi-turn exploration. + -When working with code: -- Always check for relevant files before implementing changes -- If you need to understand a specific area of the codebase, use grep to locate it -- Be precise in your file manipulation to maintain a focused context -- Remember that adding too many files dilutes the context + +## Navigation Workflow + +### Exploration Strategy +1. **Initial Discovery**: Use `Glob`, `Grep`, `Ls`, or `Find` to identify relevant files +2. **Focused Investigation**: Add promising files to context with `Add` +3. **Context Management**: Remove irrelevant files with `Remove` to maintain focus +4. **Preparation for Editing**: Convert files to editable with `MakeEditable` when needed +5. **Continued Exploration**: Add `[tool_call(Continue)]` to perform another exploration round +6. **Final Response**: Omit `Continue` when you have sufficient information to answer + +### Tool Usage Best Practices +- Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution +- Tool names are case-insensitive; parameters can be unquoted or quoted +- Verify files aren't already in context before adding them +- Use precise grep patterns with file_pattern to narrow search scope +- Target specific patterns rather than overly broad searches +- Remember the `Find` tool is optimized for locating symbols across the codebase + +### Context Management Strategy +- Keep your context focused by removing files that are no longer relevant +- For large codebases, maintain only 5-15 files in context at once for best performance +- Files are added as read-only by default; only make files editable when you need to modify them +- Toggle context management with `/context-management` if you need complete content of large files + + + +## Code Editing Process + +### SEARCH/REPLACE Block Format +When proposing code changes, describe each change with a SEARCH/REPLACE block using this exact format: + +```language_name +/path/to/file.ext +<<<<<<< SEARCH +Original code lines to match exactly +======= +Replacement code lines +>>>>>>> REPLACE +``` + +### Editing Guidelines +- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation +- Keep edit blocks focused and concise - include only the necessary context +- Include enough lines for uniqueness but avoid long unchanged sections +- For new files, use an empty SEARCH section +- To move code within a file, use two separate SEARCH/REPLACE blocks +- Respect the file paths exactly as they appear +{quad_backtick_reminder} + +### Error Handling +- If tools return errors or unexpected results, try alternative approaches +- Refine search patterns if results are too broad or too narrow +- Use the enhanced context blocks (directory structure and git status) to orient yourself + Always reply to the user in {language}. """ - example_messages = [] # Keep examples empty for now, or update them to use the new syntax + # Example messages remain unchanged as they are already good examples of proper tool usage + example_messages = [ + dict( + role="user", + content="Find all files related to authentication in this project and explain how the login flow works.", + ), + dict( + role="assistant", + content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files. - files_content_prefix = """These files have been added to the chat so you can see all of their contents. +[tool_call(Grep, pattern="login|auth|password|session", file_pattern="*.py")] +[tool_call(Continue)]""", + ), + dict( + role="user", + content="[Result (Grep): Found in 3 files: auth/models.py (12 matches), auth/views.py (8 matches), users/authentication.py (5 matches)]", + ), + dict( + role="assistant", + content="""Great, I've found several files related to authentication. Let me examine them to understand the login flow. + +[tool_call(Add, file_path="auth/models.py")] +[tool_call(Add, file_path="auth/views.py")] +[tool_call(Add, file_path="users/authentication.py")] +[tool_call(Continue)]""", + ), + dict( + role="user", + content="[Added files to context with their complete contents]", + ), + dict( + role="assistant", + content="""Now that I have the relevant files, I can explain how the authentication flow works: + +The login process in this application follows these steps: + +1. User enters credentials on the login form +2. The `auth/views.py` file handles the login request via the `LoginView` class +3. Credentials are validated by `users/authentication.py` using the `authenticate` function +4. If valid, a session token is created by the `TokenManager` in `auth/models.py` +5. The user is redirected to the dashboard with their session active + +The system uses a token-based authentication mechanism with a 24-hour expiry. Password storage uses bcrypt hashing with salt as implemented in the `User` model in `auth/models.py`. + +Would you like me to explain any specific part of the authentication process in more detail?""", + ), + ] + + # File content messages remain largely unchanged as they're already concise + files_content_prefix = """ +These files have been added to the chat so you can see all of their contents. Trust this message as the true contents of the files! + """ files_content_assistant_reply = ( "I understand. I'll use these files to help with your request." ) - files_no_full_files = "I don't have full contents of any files yet. I'll add them as needed." + files_no_full_files = "I don't have full contents of any files yet. I'll add them as needed using the tool commands." - files_no_full_files_with_repo_map = "" - files_no_full_files_with_repo_map_reply = "" - - repo_content_prefix = """I am working with code in a git repository. -Here are summaries of some files present in this repo. -I can add any file to our chat by mentioning its path. + files_no_full_files_with_repo_map = """ +I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet. +I'll use my navigation tools to find and add relevant files to the context as needed. + """ + files_no_full_files_with_repo_map_reply = """I understand. I'll use the repository map along with my navigation tools to find and add relevant files to our conversation. +""" + + repo_content_prefix = """ +I am working with code in a git repository. +Here are summaries of some files present in this repo: + +""" + + # The system_reminder is significantly streamlined to reduce duplication system_reminder = """ -Always consider which files are needed for the current task. -Remember to use the following tool commands using the `[tool_call(...)]` syntax: `Glob`, `Grep`, `Ls`, `Add`, `Remove`, `MakeEditable`, `MakeReadonly`, `Find`, `Command`, `Continue`. + +## Tool Command Reminder +- To execute a tool, use: `[tool_call(ToolName, param1=value1)]` +- To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]` +- For multi-turn exploration, end with `[tool_call(Continue)]` +- For final answers, do NOT include `[tool_call(Continue)]` -**CRITICAL FORMATTING REQUIREMENTS:** -1. All tool commands you want to *execute* **MUST** use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]`. Example: `[tool_call(Add, file_path="src/main.py")]`. Commands written this way **WILL BE EXECUTED**. -2. If you need to *show* or *discuss* a tool call example without executing it, you **MUST** escape it by adding a backslash `\` before the opening bracket. Example: `\[tool_call(Add, file_path="src/main.py")]`. Commands written this way **WILL NOT BE EXECUTED**. +## Context Features +- Use enhanced context blocks (directory structure and git status) to orient yourself +- Toggle context blocks with `/context-blocks` +- Toggle large file truncation with `/context-management` -Note: You have access to enhanced context blocks with a complete directory structure and git status information. These provide a comprehensive view of the codebase structure and changes. -Refer to these context blocks to find relevant files more efficiently. - -If you need to find more information, use tool commands (in the correct format!) as you answer. If you need to see more files before you can answer completely, use tool commands (in the -correct format!) and end with `[tool_call(Continue)]`. - -**When you have finished exploring IF you have been asked to propose code changes:** -1. Ensure you have used `[tool_call(MakeEditable, file_path="...")]` for all files you intend to modify. -2. Think step-by-step and explain the needed changes in a few short sentences. -3. Describe each change with a *SEARCH/REPLACE block*. - -# *SEARCH/REPLACE block* Rules: - -Every *SEARCH/REPLACE block* must use this format: -1. The opening fence and code language, eg: ```python -2. The *FULL* file path alone on a line, verbatim. No bold asterisks, no quotes around it, no escaping of characters, etc. -3. The start of search block: <<<<<<< SEARCH -4. A contiguous chunk of lines to search for in the existing source code -5. The dividing line: ======= -6. The lines to replace into the source code -7. The end of the replace block: >>>>>>> REPLACE -8. The closing fence: ``` - -Use the *FULL* file path, as shown to you by the user. -{quad_backtick_reminder} -Every *SEARCH* section must *EXACTLY MATCH* the existing file content, character for character, including all comments, docstrings, etc. -If the file contains code or other data wrapped/escaped in json/xml/quotes or other containers, you need to propose edits to the literal contents of the file, including the container markup. - -*SEARCH/REPLACE* blocks will *only* replace the first match occurrence. -Including multiple unique *SEARCH/REPLACE* blocks if needed. -Include enough lines in each SEARCH section to uniquely match each set of lines that need to change. - -Keep *SEARCH/REPLACE* blocks concise. -Break large *SEARCH/REPLACE* blocks into a series of smaller blocks that each change a small portion of the file. -Include just the changing lines, and a few surrounding lines if needed for uniqueness. -Do not include long runs of unchanging lines in *SEARCH/REPLACE* blocks. - -To move code within a file, use 2 *SEARCH/REPLACE* blocks: 1 to delete it from its current location, 1 to insert it in the new location. - -Pay attention to which filenames the user wants you to edit, especially if they are asking you to create a new file. - -If you want to put code in a new file, use a *SEARCH/REPLACE block* with: -- A new file path, including dir name if needed -- An empty `SEARCH` section -- The new file's contents in the `REPLACE` section - -To rename files which have been added to the chat, use shell commands at the end of your response. - -If the user just says something like "ok" or "go ahead" or "do that" they probably want you to make SEARCH/REPLACE blocks for the code changes you just proposed. -The user will say when they've applied your edits. If they haven't explicitly confirmed the edits have been applied, they probably want proper SEARCH/REPLACE blocks. +## Code Editing Reminder +When editing: +1. Make target files editable with `[tool_call(MakeEditable, file_path="...")]` +2. Use SEARCH/REPLACE blocks that EXACTLY match existing content +3. Keep edit blocks focused and concise +4. For ambiguous user inputs like "ok" or "go ahead", assume they want you to implement the changes {lazy_prompt} -ONLY EVER RETURN CODE IN A *SEARCH/REPLACE BLOCK*! {shell_cmd_reminder} - -4. **IMPORTANT:** Do **NOT** include `[tool_call(Continue)]` in your response when you are providing code edits. Your response should contain only the explanation and the edit blocks. - -If you are providing a final answer, explanation, or asking the user a question *without* proposing code edits, simply provide your response text without any tool calls (especially -`[tool_call(Continue)]`). - -To toggle these enhanced context blocks, the user can use the /context-blocks command. + """ -try_again = """""" + try_again = """I need to retry my exploration to better answer your question. + +Here are the issues I encountered in my previous exploration: +1. Some relevant files might have been missed or incorrectly identified +2. The search patterns may have been too broad or too narrow +3. The context might have become too cluttered with irrelevant files + +Let me explore the codebase more strategically this time: +- I'll use more specific search patterns +- I'll be more selective about which files to add to context +- I'll remove irrelevant files more proactively + +I'll start exploring again with improved search strategies to find exactly what we need. +""" From 888bf095ba230ce4183fedcddb0a09f6f10c3ee4 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 11:58:21 -0400 Subject: [PATCH 03/63] Include a section with a summary of the files available to the LLM, to encourage it to remove --- aider/coders/navigator_coder.py | 89 +++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 72212c141..44b930edf 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -75,10 +75,15 @@ class NavigatorCoder(Coder): # Get git status git_status = self.get_git_status() + # Get current context summary + context_summary = self.get_context_summary() + # Collect all context blocks that exist context_blocks = [] if env_context: context_blocks.append(env_context) + if context_summary: + context_blocks.append(context_summary) if dir_structure: context_blocks.append(dir_structure) if git_status: @@ -98,6 +103,90 @@ class NavigatorCoder(Coder): return chunks + def get_context_summary(self): + """ + Generate a summary of the current file context, including editable and read-only files, + along with token counts to encourage proactive context management. + """ + if not self.use_enhanced_context: + return None + + try: + result = "\n" + result += "## Current Context Overview\n\n" + + # Get model context limits + max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 + max_output_tokens = self.main_model.info.get("max_output_tokens") or 0 + if max_input_tokens: + result += f"Model context limit: {max_input_tokens:,} tokens\n\n" + + # Calculate total tokens in context + total_tokens = 0 + editable_tokens = 0 + readonly_tokens = 0 + + # Track editable files + if self.abs_fnames: + result += "### Editable Files\n\n" + editable_files = [] + + for fname in sorted(self.abs_fnames): + rel_fname = self.get_rel_fname(fname) + content = self.io.read_text(fname) + if content is not None: + token_count = self.main_model.token_count(content) + total_tokens += token_count + editable_tokens += token_count + size_indicator = "🔴 Large" if token_count > 5000 else ("🟡 Medium" if token_count > 1000 else "🟢 Small") + editable_files.append(f"- {rel_fname}: {token_count:,} tokens ({size_indicator})") + + if editable_files: + result += "\n".join(editable_files) + "\n\n" + result += f"**Total editable: {len(editable_files)} files, {editable_tokens:,} tokens**\n\n" + else: + result += "No editable files in context\n\n" + + # Track read-only files + if self.abs_read_only_fnames: + result += "### Read-Only Files\n\n" + readonly_files = [] + + for fname in sorted(self.abs_read_only_fnames): + rel_fname = self.get_rel_fname(fname) + content = self.io.read_text(fname) + if content is not None: + token_count = self.main_model.token_count(content) + total_tokens += token_count + readonly_tokens += token_count + size_indicator = "🔴 Large" if token_count > 5000 else ("🟡 Medium" if token_count > 1000 else "🟢 Small") + readonly_files.append(f"- {rel_fname}: {token_count:,} tokens ({size_indicator})") + + if readonly_files: + result += "\n".join(readonly_files) + "\n\n" + result += f"**Total read-only: {len(readonly_files)} files, {readonly_tokens:,} tokens**\n\n" + else: + result += "No read-only files in context\n\n" + + # Summary and recommendations + result += f"**Total context usage: {total_tokens:,} tokens**" + + if max_input_tokens: + percentage = (total_tokens / max_input_tokens) * 100 + result += f" ({percentage:.1f}% of limit)" + + if percentage > 80: + result += "\n\n⚠️ **Context is getting full!** Consider removing files with:\n" + result += "- `[tool_call(Remove, file_path=\"path/to/large_file.ext\")]` for files no longer needed\n" + result += "- Focus on keeping only essential files in context for best results" + + result += "\n" + return result + + except Exception as e: + self.io.tool_error(f"Error generating context summary: {str(e)}") + return None + def get_environment_info(self): """ Generate an environment information context block with key system details. From 4339c73774f7e7c28cfa6797480cb882c116890c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 12:05:19 -0400 Subject: [PATCH 04/63] Instead of using Continue, just use the presence of tool calls for multi-turn --- aider/coders/navigator_coder.py | 24 +++++++++++------------- aider/coders/navigator_prompts.py | 20 +++++++++----------- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 44b930edf..866ad6ca5 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -255,8 +255,8 @@ class NavigatorCoder(Coder): return True original_content = content # Keep the original response - # Process tool commands: returns content with tool calls removed, results, and continue flag - processed_content, result_messages, continue_requested = self._process_tool_commands(content) + # Process tool commands: returns content with tool calls removed, results, and flag if any tool calls were found + processed_content, result_messages, tool_calls_found = self._process_tool_commands(content) # Since we are no longer suppressing, the partial_response_content IS the final content. # We might want to update it to the processed_content (without tool calls) if we don't @@ -267,8 +267,9 @@ class NavigatorCoder(Coder): # Process implicit file mentions using the content *after* tool calls were removed self._process_file_mentions(processed_content) - # If continue was requested and we haven't exceeded reflection limits, set up for another iteration - if continue_requested and self.num_reflections < self.max_reflections: + # If any tool calls were found and we haven't exceeded reflection limits, set up for another iteration + # This is implicit continuation when any tool calls are present, rather than requiring Continue explicitly + if tool_calls_found and self.num_reflections < self.max_reflections: # Reset tool counter for next iteration self.tool_call_count = 0 # Clear exploration files for the next round @@ -341,10 +342,11 @@ class NavigatorCoder(Coder): def _process_tool_commands(self, content): """ Process tool commands in the `[tool_call(name, param=value)]` format within the content. + Returns processed content, result messages, and a flag indicating if any tool calls were found. """ result_messages = [] modified_content = content # Start with original content - continue_requested = False + tool_calls_found = False call_count = 0 max_calls = self.max_tool_calls @@ -380,13 +382,9 @@ class NavigatorCoder(Coder): args_str = match.group(2) or "" full_match_str = match.group(0) - # Handle Continue separately - if tool_name.lower() == 'continue': - continue_requested = True - # Remove this specific call from the content - modified_content = modified_content.replace(full_match_str, "", 1) - processed_indices.add((match.start(), match.end())) - continue # Don't process further, just note the request + # We no longer need to handle Continue separately, as we'll continue if any tool calls exist + # Just track that a tool call was found + tool_calls_found = True # Extract parameters params = {} @@ -488,7 +486,7 @@ class NavigatorCoder(Coder): # Update internal counter self.tool_call_count += call_count - return modified_content, result_messages, continue_requested + return modified_content, result_messages, tool_calls_found def _apply_edits_from_response(self): """ diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 068e17ad7..2db0292cc 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -65,9 +65,8 @@ Act as an expert software engineer with the ability to autonomously navigate and Execute a shell command. Requires user confirmation. **Do NOT use this for aider commands starting with `/` (like `/add`, `/run`, `/diff`).** -- **Continue**: `[tool_call(Continue)]` - Continue exploration in the next round with the current files. - This enables multi-turn exploration. +### Multi-Turn Exploration +When you include any tool call, the system will automatically continue to the next round. @@ -78,8 +77,8 @@ Act as an expert software engineer with the ability to autonomously navigate and 2. **Focused Investigation**: Add promising files to context with `Add` 3. **Context Management**: Remove irrelevant files with `Remove` to maintain focus 4. **Preparation for Editing**: Convert files to editable with `MakeEditable` when needed -5. **Continued Exploration**: Add `[tool_call(Continue)]` to perform another exploration round -6. **Final Response**: Omit `Continue` when you have sufficient information to answer +5. **Continued Exploration**: Include any tool call to automatically continue to the next round +6. **Final Response**: Omit all tool calls when you have sufficient information to provide a final answer ### Tool Usage Best Practices - Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution @@ -139,8 +138,7 @@ Always reply to the user in {language}. role="assistant", content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files. -[tool_call(Grep, pattern="login|auth|password|session", file_pattern="*.py")] -[tool_call(Continue)]""", +[tool_call(Grep, pattern="login|auth|password|session", file_pattern="*.py")]""", ), dict( role="user", @@ -152,8 +150,7 @@ Always reply to the user in {language}. [tool_call(Add, file_path="auth/models.py")] [tool_call(Add, file_path="auth/views.py")] -[tool_call(Add, file_path="users/authentication.py")] -[tool_call(Continue)]""", +[tool_call(Add, file_path="users/authentication.py")]""", ), dict( role="user", @@ -211,8 +208,8 @@ Here are summaries of some files present in this repo: ## Tool Command Reminder - To execute a tool, use: `[tool_call(ToolName, param1=value1)]` - To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]` -- For multi-turn exploration, end with `[tool_call(Continue)]` -- For final answers, do NOT include `[tool_call(Continue)]` +- Including ANY tool call will automatically continue to the next round +- For final answers, do NOT include any tool calls ## Context Features - Use enhanced context blocks (directory structure and git status) to orient yourself @@ -242,6 +239,7 @@ Let me explore the codebase more strategically this time: - I'll use more specific search patterns - I'll be more selective about which files to add to context - I'll remove irrelevant files more proactively +- I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. """ From 765002d4868cc5988f2ea17f2c118021e1b4d88c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 13:20:34 -0400 Subject: [PATCH 05/63] Use a parens parser and then Python's ast.parse to parse tool calls robustly --- aider/coders/navigator_coder.py | 182 +++++++++++++++++++++++--------- 1 file changed, 135 insertions(+), 47 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 866ad6ca5..d3f2fb994 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -1,3 +1,4 @@ +import ast import re import fnmatch import os @@ -350,66 +351,149 @@ class NavigatorCoder(Coder): call_count = 0 max_calls = self.max_tool_calls - # Regex to find tool calls: [tool_call(name, key=value, key="value", ...)] - # It captures the tool name and the arguments string. - # It handles quoted and unquoted values. - tool_call_pattern = re.compile( - r"\[tool_call\(\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*" # Tool name - r"(?:,\s*(.*?))?\s*\)\]" # Optional arguments string (non-greedy) - ) + # Find tool calls using a more robust method + processed_content = "" + last_index = 0 + start_marker = "[tool_call(" + end_marker = "]" # The parenthesis balancing finds the ')', we just need the final ']' - # Regex to parse key=value pairs within the arguments string - # Handles key=value, key="value", key='value' - # Allows values to contain commas if quoted - args_pattern = re.compile( - r"([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*" # Key - r"(?:\"(.*?)\"|\'(.*?)\'|([^,\s\'\"]*))" # Value (quoted or unquoted) - ) + while True: + start_pos = content.find(start_marker, last_index) + if start_pos == -1: + processed_content += content[last_index:] + break - processed_indices = set() # Keep track of processed match ranges + # Append content before the tool call + processed_content += content[last_index:start_pos] - for match in tool_call_pattern.finditer(content): - # Skip overlapping matches if a previous match already covered this area - if any(match.start() >= start and match.end() <= end for start, end in processed_indices): + scan_start_pos = start_pos + len(start_marker) + paren_level = 1 + in_single_quotes = False + in_double_quotes = False + escaped = False + end_paren_pos = -1 + + # Scan to find the matching closing parenthesis, respecting quotes + for i in range(scan_start_pos, len(content)): + char = content[i] + + if escaped: + escaped = False + elif char == '\\': + escaped = True + elif char == "'" and not in_double_quotes: + in_single_quotes = not in_single_quotes + elif char == '"' and not in_single_quotes: + in_double_quotes = not in_double_quotes + elif char == '(' and not in_single_quotes and not in_double_quotes: + paren_level += 1 + elif char == ')' and not in_single_quotes and not in_double_quotes: + paren_level -= 1 + if paren_level == 0: + end_paren_pos = i + break + + # Check for the end marker after the closing parenthesis, skipping whitespace + expected_end_marker_start = end_paren_pos + 1 + actual_end_marker_start = -1 + end_marker_found = False + if end_paren_pos != -1: # Only search if we found a closing parenthesis + for j in range(expected_end_marker_start, len(content)): + if not content[j].isspace(): + actual_end_marker_start = j + # Check if the found character is the end marker ']' + if content[actual_end_marker_start] == end_marker: + end_marker_found = True + break # Stop searching after first non-whitespace char + + if not end_marker_found: + # Malformed call: couldn't find matching ')' or the subsequent ']' + self.io.tool_warning(f"Malformed tool call starting at index {start_pos}. Skipping (end_paren_pos={end_paren_pos}, end_marker_found={end_marker_found}).") + # Append the start marker itself to processed content so it's not lost + processed_content += start_marker + last_index = scan_start_pos # Continue searching after the marker continue + # Found a potential tool call + # Adjust full_match_str and last_index based on the actual end marker ']' position + full_match_str = content[start_pos : actual_end_marker_start + 1] # End marker ']' is 1 char + inner_content = content[scan_start_pos:end_paren_pos].strip() + last_index = actual_end_marker_start + 1 # Move past the processed call (including ']') + + call_count += 1 if call_count > max_calls: self.io.tool_warning(f"Exceeded maximum tool calls ({max_calls}). Skipping remaining calls.") - break + # Don't append the skipped call to processed_content + continue # Skip processing this call - tool_name = match.group(1) - args_str = match.group(2) or "" - full_match_str = match.group(0) - - # We no longer need to handle Continue separately, as we'll continue if any tool calls exist - # Just track that a tool call was found tool_calls_found = True - - # Extract parameters + tool_name = None params = {} - suppressed_arg_values = ["..."] # Values to ignore during parsing + result_message = None + try: - for arg_match in args_pattern.finditer(args_str): - key = arg_match.group(1) - # Value can be in group 2 (double quotes), 3 (single quotes), or 4 (unquoted) - value = arg_match.group(2) or arg_match.group(3) or arg_match.group(4) + # Wrap the inner content to make it parseable as a function call + # Example: ToolName, key="value" becomes f(ToolName, key="value") + parse_str = f"f({inner_content})" + parsed_ast = ast.parse(parse_str) - # Check if the value is suppressed - if value in suppressed_arg_values: + # Validate AST structure + if not isinstance(parsed_ast, ast.Module) or not parsed_ast.body or not isinstance(parsed_ast.body[0], ast.Expr): + raise ValueError("Unexpected AST structure") + call_node = parsed_ast.body[0].value + if not isinstance(call_node, ast.Call): + raise ValueError("Expected a Call node") + + # Extract tool name (should be the first positional argument) + if not call_node.args or not isinstance(call_node.args[0], ast.Name): + raise ValueError("Tool name not found or invalid") + tool_name = call_node.args[0].id + + # Extract keyword arguments + for keyword in call_node.keywords: + key = keyword.arg + value_node = keyword.value + # Extract value based on AST node type + if isinstance(value_node, ast.Constant): + value = value_node.value + elif isinstance(value_node, ast.Name): # Handle unquoted values like True/False/None or variables (though variables are unlikely here) + value = value_node.id + # Add more types if needed (e.g., ast.List, ast.Dict) + else: + # Attempt to reconstruct the source for complex types, or raise error + try: + # Note: ast.unparse requires Python 3.9+ + # If using older Python, might need a different approach or limit supported types + value = ast.unparse(value_node) + except AttributeError: # Handle case where ast.unparse is not available + raise ValueError(f"Unsupported argument type for key '{key}': {type(value_node)}") + except Exception as ue: + raise ValueError(f"Could not unparse value for key '{key}': {ue}") + + + # Check for suppressed values (e.g., "...") + suppressed_arg_values = ["..."] + if isinstance(value, str) and value in suppressed_arg_values: self.io.tool_warning(f"Skipping suppressed argument value '{value}' for key '{key}' in tool '{tool_name}'") - continue # Skip this argument + continue - params[key] = value if value is not None else "" - except Exception as e: - result_messages.append(f"[Result ({tool_name}): Error parsing arguments '{args_str}': {e}]") - # Remove the malformed call from the content - modified_content = modified_content.replace(full_match_str, "", 1) - processed_indices.add((match.start(), match.end())) - continue # Skip execution if args parsing failed + params[key] = value + + + except (SyntaxError, ValueError) as e: + result_message = f"Error parsing tool call '{inner_content}': {e}" + self.io.tool_error(f"Failed to parse tool call: {full_match_str}\nError: {e}") + # Don't append the malformed call to processed_content + result_messages.append(f"[Result (Parse Error): {result_message}]") + continue # Skip execution + except Exception as e: # Catch any other unexpected parsing errors + result_message = f"Unexpected error parsing tool call '{inner_content}': {e}" + self.io.tool_error(f"Unexpected error during parsing: {full_match_str}\nError: {e}\n{traceback.format_exc()}") + result_messages.append(f"[Result (Parse Error): {result_message}]") + continue # Execute the tool based on its name - result_message = None try: # Normalize tool name for case-insensitive matching norm_tool_name = tool_name.lower() @@ -474,14 +558,18 @@ class NavigatorCoder(Coder): except Exception as e: result_message = f"Error executing {tool_name}: {str(e)}" - self.io.tool_error(f"Error during {tool_name} execution: {e}") + self.io.tool_error(f"Error during {tool_name} execution: {e}\n{traceback.format_exc()}") if result_message: result_messages.append(f"[Result ({tool_name}): {result_message}]") - # Remove the processed tool call from the content - modified_content = modified_content.replace(full_match_str, "", 1) - processed_indices.add((match.start(), match.end())) + # Note: We don't add the tool call string back to processed_content + + # Update internal counter + self.tool_call_count += call_count + + # Return the content with tool calls removed + modified_content = processed_content # Update internal counter self.tool_call_count += call_count From a310df3df316ba827c84f8ef1c98e607a0a69a61 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 14:16:44 -0400 Subject: [PATCH 06/63] Allow escaping of tool calls by the model --- aider/coders/navigator_coder.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index d3f2fb994..042622865 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -363,7 +363,16 @@ class NavigatorCoder(Coder): processed_content += content[last_index:] break - # Append content before the tool call + # Check for escaped tool call: \[tool_call( + if start_pos > 0 and content[start_pos - 1] == '\\': + # Append the content including the escaped marker + # We append up to start_pos + len(start_marker) to include the marker itself. + processed_content += content[last_index : start_pos + len(start_marker)] + # Update last_index to search after this escaped marker + last_index = start_pos + len(start_marker) + continue # Continue searching for the next potential marker + + # Append content before the (non-escaped) tool call processed_content += content[last_index:start_pos] scan_start_pos = start_pos + len(start_marker) From 7f0ef1a04acfd330a7d507019525e0ef6d115009 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Fri, 11 Apr 2025 14:37:30 -0400 Subject: [PATCH 07/63] Granular tool-call based editing --- aider/change_tracker.py | 118 +++ aider/coders/base_coder.py | 4 - aider/coders/navigator_coder.py | 1144 +++++++++++++++++++++++++++++ aider/coders/navigator_prompts.py | 130 +++- 4 files changed, 1384 insertions(+), 12 deletions(-) create mode 100644 aider/change_tracker.py diff --git a/aider/change_tracker.py b/aider/change_tracker.py new file mode 100644 index 000000000..d06460e99 --- /dev/null +++ b/aider/change_tracker.py @@ -0,0 +1,118 @@ +import time +import uuid +from collections import defaultdict +from datetime import datetime + +class ChangeTracker: + """ + Tracks changes made to files for the undo functionality. + This enables granular editing operations with the ability to undo specific changes. + """ + + def __init__(self): + self.changes = {} # change_id -> change_info + self.files_changed = defaultdict(list) # file_path -> [change_ids] + + def track_change(self, file_path, change_type, original_content, new_content, + metadata=None, change_id=None): + """ + Record a change to enable future undo operations. + + Parameters: + - file_path: Path to the file that was changed + - change_type: Type of change (e.g., 'replacetext', 'insertlines') + - original_content: Original content before the change + - new_content: New content after the change + - metadata: Additional information about the change (line numbers, positions, etc.) + - change_id: Optional custom ID for the change (if None, one will be generated) + + Returns: + - change_id: Unique identifier for the change + """ + if change_id is None: + change_id = self._generate_change_id() + + change = { + 'id': change_id, + 'file_path': file_path, + 'type': change_type, + 'original': original_content, + 'new': new_content, + 'metadata': metadata or {}, + 'timestamp': time.time() + } + + self.changes[change_id] = change + self.files_changed[file_path].append(change_id) + return change_id + + def undo_change(self, change_id): + """ + Get information needed to reverse a specific change by ID. + + Parameters: + - change_id: ID of the change to undo + + Returns: + - (success, message, change_info): Tuple with success flag, message, and change information + """ + if change_id not in self.changes: + return False, f"Change ID {change_id} not found", None + + change = self.changes[change_id] + + # Mark this change as undone by removing it from the tracking dictionaries + self.files_changed[change['file_path']].remove(change_id) + if not self.files_changed[change['file_path']]: + del self.files_changed[change['file_path']] + + # Keep the change in the changes dict but mark it as undone + change['undone'] = True + change['undone_at'] = time.time() + + return True, f"Undid change {change_id} in {change['file_path']}", change + + def get_last_change(self, file_path): + """ + Get the most recent change for a specific file. + + Parameters: + - file_path: Path to the file + + Returns: + - change_id or None if no changes found + """ + changes = self.files_changed.get(file_path, []) + if not changes: + return None + return changes[-1] + + def list_changes(self, file_path=None, limit=10): + """ + List recent changes, optionally filtered by file. + + Parameters: + - file_path: Optional path to filter changes by file + - limit: Maximum number of changes to list + + Returns: + - List of change dictionaries + """ + if file_path: + # Get changes only for the specified file + change_ids = self.files_changed.get(file_path, []) + changes = [self.changes[cid] for cid in change_ids if cid in self.changes] + else: + # Get all changes + changes = list(self.changes.values()) + + # Filter out undone changes and sort by timestamp (most recent first) + changes = [c for c in changes if not c.get('undone', False)] + changes = sorted(changes, key=lambda c: c['timestamp'], reverse=True) + + # Apply limit + return changes[:limit] + + def _generate_change_id(self): + """Generate a unique ID for a change.""" + return str(uuid.uuid4())[:8] # Short, readable ID \ No newline at end of file diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 551039f8f..355634366 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -43,10 +43,6 @@ from ..dump import dump # noqa: F401 from .chat_chunks import ChatChunks -# Pattern to detect fenced search/replace blocks -SEARCH_REPLACE_FENCE = re.compile(r"```search_replace\n", re.MULTILINE) - - class UnknownEditFormat(ValueError): def __init__(self, edit_format, valid_formats): self.edit_format = edit_format diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 042622865..40f669b64 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -20,6 +20,8 @@ from aider.repo import ANY_GIT_ERROR from aider import urls # Import run_cmd_subprocess directly for non-interactive execution from aider.run_cmd import run_cmd_subprocess +# Import the change tracker +from aider.change_tracker import ChangeTracker class NavigatorCoder(Coder): """Mode where the LLM autonomously manages which files are in context.""" @@ -43,6 +45,8 @@ class NavigatorCoder(Coder): # Enable context management by default only in navigator mode self.context_management_enabled = True # Enabled by default for navigator mode + # Initialize change tracker for granular editing + self.change_tracker = ChangeTracker() # Track files added during current exploration self.files_added_in_exploration = set() @@ -466,6 +470,17 @@ class NavigatorCoder(Coder): # Extract value based on AST node type if isinstance(value_node, ast.Constant): value = value_node.value + # Check if this is a multiline string and trim whitespace + if isinstance(value, str) and '\n' in value: + # Get the source line(s) for this node to check if it's a triple-quoted string + lineno = value_node.lineno if hasattr(value_node, 'lineno') else 0 + end_lineno = value_node.end_lineno if hasattr(value_node, 'end_lineno') else lineno + if end_lineno > lineno: # It's a multiline string + # Trim exactly one leading and one trailing newline if present + if value.startswith('\n'): + value = value[1:] + if value.endswith('\n'): + value = value[:-1] elif isinstance(value_node, ast.Name): # Handle unquoted values like True/False/None or variables (though variables are unlikely here) value = value_node.id # Add more types if needed (e.g., ast.List, ast.Dict) @@ -562,6 +577,118 @@ class NavigatorCoder(Coder): result_message = self._execute_command(command_string) else: result_message = "Error: Missing 'command_string' parameter for Command" + + # Granular editing tools + elif norm_tool_name == 'replacetext': + file_path = params.get('file_path') + find_text = params.get('find_text') + replace_text = params.get('replace_text') + near_context = params.get('near_context') + occurrence = params.get('occurrence', 1) + change_id = params.get('change_id') + + if file_path is not None and find_text is not None and replace_text is not None: + result_message = self._execute_replace_text( + file_path, find_text, replace_text, near_context, occurrence, change_id + ) + else: + result_message = "Error: Missing required parameters for ReplaceText" + + elif norm_tool_name == 'replaceall': + file_path = params.get('file_path') + find_text = params.get('find_text') + replace_text = params.get('replace_text') + change_id = params.get('change_id') + + if file_path is not None and find_text is not None and replace_text is not None: + result_message = self._execute_replace_all( + file_path, find_text, replace_text, change_id + ) + else: + result_message = "Error: Missing required parameters for ReplaceAll" + + elif norm_tool_name == 'insertblock': + file_path = params.get('file_path') + content = params.get('content') + after_pattern = params.get('after_pattern') + before_pattern = params.get('before_pattern') + change_id = params.get('change_id') + + if file_path is not None and content is not None and (after_pattern is not None or before_pattern is not None): + result_message = self._execute_insert_block( + file_path, content, after_pattern, before_pattern, change_id + ) + else: + result_message = "Error: Missing required parameters for InsertBlock" + + elif norm_tool_name == 'deleteblock': + file_path = params.get('file_path') + start_pattern = params.get('start_pattern') + end_pattern = params.get('end_pattern') + line_count = params.get('line_count') + change_id = params.get('change_id') + + if file_path is not None and start_pattern is not None: + result_message = self._execute_delete_block( + file_path, start_pattern, end_pattern, line_count, change_id + ) + else: + result_message = "Error: Missing required parameters for DeleteBlock" + + elif norm_tool_name == 'replaceline': + file_path = params.get('file_path') + line_number = params.get('line_number') + new_content = params.get('new_content') + change_id = params.get('change_id') + + if file_path is not None and line_number is not None and new_content is not None: + result_message = self._execute_replace_line( + file_path, line_number, new_content, change_id + ) + else: + result_message = "Error: Missing required parameters for ReplaceLine" + + elif norm_tool_name == 'replacelines': + file_path = params.get('file_path') + start_line = params.get('start_line') + end_line = params.get('end_line') + new_content = params.get('new_content') + change_id = params.get('change_id') + + if file_path is not None and start_line is not None and end_line is not None and new_content is not None: + result_message = self._execute_replace_lines( + file_path, start_line, end_line, new_content, change_id + ) + else: + result_message = "Error: Missing required parameters for ReplaceLines" + + elif norm_tool_name == 'indentlines': + file_path = params.get('file_path') + start_pattern = params.get('start_pattern') + end_pattern = params.get('end_pattern') + line_count = params.get('line_count') + indent_levels = params.get('indent_levels', 1) + change_id = params.get('change_id') + + if file_path is not None and start_pattern is not None: + result_message = self._execute_indent_lines( + file_path, start_pattern, end_pattern, line_count, indent_levels, change_id + ) + else: + result_message = "Error: Missing required parameters for IndentLines" + + elif norm_tool_name == 'undochange': + change_id = params.get('change_id') + last_file = params.get('last_file') + + result_message = self._execute_undo_change(change_id, last_file) + + elif norm_tool_name == 'listchanges': + file_path = params.get('file_path') + limit = params.get('limit', 10) + + result_message = self._execute_list_changes(file_path, limit) + else: result_message = f"Error: Unknown tool name '{tool_name}'" @@ -1506,3 +1633,1020 @@ Just reply with fixed versions of the {blocks} above that failed to match. self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.") return True + + # ------------------- Granular Editing Tools ------------------- + + def _execute_replace_text(self, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None): + """ + Replace specific text with new text, optionally using nearby context for disambiguation. + + Parameters: + - file_path: Path to the file to modify + - find_text: Text to find and replace + - replace_text: Text to replace it with + - near_context: Optional text nearby to help locate the correct instance + - occurrence: Which occurrence to replace (1-based index, or -1 for last) + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + content = self.io.read_text(abs_path) + if content is None: + return f"Error reading file: {file_path}" + + # If near_context is provided, narrow down the search + if near_context: + # Find the section containing both find_text and near_context + sections = [] + for i in range(len(content)): + if i + len(find_text) <= len(content) and content[i:i+len(find_text)] == find_text: + # Look for near_context within a reasonable window (e.g., 200 chars) + window_start = max(0, i - 200) + window_end = min(len(content), i + len(find_text) + 200) + window = content[window_start:window_end] + if near_context in window: + sections.append(i) + + if not sections: + self.io.tool_error(f"Could not find '{find_text}' near '{near_context}'") + return f"Error: Text not found near specified context" + + # Select the occurrence (1-based index) + if occurrence == -1: # Last occurrence + start_index = sections[-1] + else: + occurrence_idx = min(occurrence - 1, len(sections) - 1) + start_index = sections[occurrence_idx] + else: + # Find all occurrences of find_text + sections = [] + start = 0 + while True: + start = content.find(find_text, start) + if start == -1: + break + sections.append(start) + start += 1 # Move past this occurrence + + if not sections: + self.io.tool_error(f"Text '{find_text}' not found in file") + return f"Error: Text not found in file" + + # Select the occurrence (1-based index) + if occurrence == -1: # Last occurrence + start_index = sections[-1] + else: + occurrence_idx = min(occurrence - 1, len(sections) - 1) + start_index = sections[occurrence_idx] + + # Perform the replacement + original_content = content + new_content = content[:start_index] + replace_text + content[start_index + len(find_text):] + + if original_content == new_content: + self.io.tool_warning(f"No changes made: replacement text is identical to original") + return f"Warning: No changes made (replacement identical to original)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content) + + # Track the change + metadata = { + 'start_index': start_index, + 'find_text': find_text, + 'replace_text': replace_text, + 'near_context': near_context, + 'occurrence': occurrence + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='replacetext', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + + # Get more context around the replace (get up to 3 lines before and after) + lines = content.splitlines() + found_idx = -1 + for i, line in enumerate(lines): + if start_index < len(''.join(lines[:i+1])) + i: # Account for newlines + found_idx = i + break + + if found_idx != -1: + # Get lines with context + start_line = max(0, found_idx - 3) + end_line = min(len(lines) - 1, found_idx + 3) + + # Format the diff in git style + diff_lines = [] + for i in range(start_line, end_line + 1): + if i == found_idx: + # This is the line containing the change + line = lines[i] + # Find position of match within the line + line_start = start_index - len(''.join(lines[:i])) - i + if line_start >= 0 and line_start + len(find_text) <= len(line): + # If we can isolate the exact position in the line + old_line = line + new_line = line[:line_start] + replace_text + line[line_start + len(find_text):] + diff_lines.append(f"- {old_line}") + diff_lines.append(f"+ {new_line}") + else: + # If we can't isolate exact position (e.g., multi-line match) + diff_lines.append(f"- {line}") + # Try our best approximation for the new line + if find_text in line: + diff_lines.append(f"+ {line.replace(find_text, replace_text)}") + else: + diff_lines.append(f"+ [modified line]") + else: + # Context line, prefix with space + diff_lines.append(f" {lines[i]}") + + diff_example = f"@@ line {start_line+1},{end_line+1} @@\n" + "\n".join(diff_lines) + else: + # Fallback if we can't locate the exact line + diff_example = f"- {find_text}\n+ {replace_text}" + + self.io.tool_output(f"✅ Replaced text in {file_path} (change_id: {change_id})") + return f"Successfully replaced text (change_id: {change_id}):\n{diff_example}" + else: + self.io.tool_output(f"Did not replace text in {file_path} (--dry-run)") + return f"Did not replace text (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in ReplaceText: {str(e)}") + return f"Error: {str(e)}" + + def _execute_replace_all(self, file_path, find_text, replace_text, change_id=None): + """ + Replace all occurrences of text in a file. + + Parameters: + - file_path: Path to the file to modify + - find_text: Text to find and replace + - replace_text: Text to replace it with + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + content = self.io.read_text(abs_path) + if content is None: + return f"Error reading file: {file_path}" + + # Count occurrences + count = content.count(find_text) + if count == 0: + self.io.tool_warning(f"Text '{find_text}' not found in file") + return f"Warning: Text not found in file" + + # Perform the replacement + original_content = content + new_content = content.replace(find_text, replace_text) + + if original_content == new_content: + self.io.tool_warning(f"No changes made: replacement text is identical to original") + return f"Warning: No changes made (replacement identical to original)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content) + + # Track the change + metadata = { + 'find_text': find_text, + 'replace_text': replace_text, + 'occurrences': count + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='replaceall', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + + # Build a mapping of line number to replacements on that line + line_changes = {} + + # Split content into lines + lines = content.splitlines() + + # Keep track of character position across all lines + char_pos = 0 + for line_idx, line in enumerate(lines): + line_len = len(line) + + # Look for occurrences within this line + line_pos = 0 + while line_pos <= line_len - len(find_text): + match_pos = line[line_pos:].find(find_text) + if match_pos == -1: + break + + # Found a match in this line + true_pos = line_pos + match_pos + if line_idx not in line_changes: + line_changes[line_idx] = [] + + line_changes[line_idx].append((true_pos, find_text, replace_text)) + + # Move past this match + line_pos = true_pos + len(find_text) + + # Move to next line (add 1 for the newline) + char_pos += line_len + 1 + + # Generate git-style diffs for each affected line with context + diff_chunks = [] + sorted_changed_lines = sorted(line_changes.keys()) + + # Group adjacent changed lines into chunks + chunks = [] + current_chunk = [] + + for line_idx in sorted_changed_lines: + if not current_chunk or line_idx <= current_chunk[-1] + 6: # Keep chunks within 6 lines + current_chunk.append(line_idx) + else: + chunks.append(current_chunk) + current_chunk = [line_idx] + + if current_chunk: + chunks.append(current_chunk) + + # Generate diff for each chunk + for chunk in chunks: + min_line = max(0, min(chunk) - 3) # 3 lines of context before + max_line = min(len(lines) - 1, max(chunk) + 3) # 3 lines of context after + + diff_lines = [] + diff_lines.append(f"@@ line {min_line+1},{max_line+1} @@") + + for i in range(min_line, max_line + 1): + if i in line_changes: + # This is a line with changes + original_line = lines[i] + modified_line = original_line + + # Apply all replacements to this line + # We need to apply them from right to left to maintain correct positions + changes = sorted(line_changes[i], key=lambda x: x[0], reverse=True) + for pos, old_text, new_text in changes: + modified_line = modified_line[:pos] + new_text + modified_line[pos + len(old_text):] + + diff_lines.append(f"- {original_line}") + diff_lines.append(f"+ {modified_line}") + else: + # Context line + diff_lines.append(f" {lines[i]}") + + diff_chunks.append("\n".join(diff_lines)) + + # Join all chunks into a single diff + diff_examples = "\n\n".join(diff_chunks) if diff_chunks else "No changes shown (content parsing error)" + + self.io.tool_output(f"✅ Replaced {count} occurrences in {file_path} (change_id: {change_id})") + return f"Successfully replaced {count} occurrences (change_id: {change_id}) with examples:\n{diff_examples}" + else: + self.io.tool_output(f"Did not replace text in {file_path} (--dry-run)") + return f"Did not replace text (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in ReplaceAll: {str(e)}") + return f"Error: {str(e)}" + + def _execute_insert_block(self, file_path, content, after_pattern=None, before_pattern=None, change_id=None): + """ + Insert a block of text after or before a specified pattern. + + Parameters: + - file_path: Path to the file to modify + - content: Text block to insert + - after_pattern: Pattern after which to insert the block (line containing this pattern) + - before_pattern: Pattern before which to insert the block (line containing this pattern) + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + file_content = self.io.read_text(abs_path) + if file_content is None: + return f"Error reading file: {file_path}" + + # Validate we have either after_pattern or before_pattern, but not both + if after_pattern and before_pattern: + self.io.tool_error("Cannot specify both after_pattern and before_pattern") + return "Error: Cannot specify both after_pattern and before_pattern" + if not after_pattern and not before_pattern: + self.io.tool_error("Must specify either after_pattern or before_pattern") + return "Error: Must specify either after_pattern or before_pattern" + + # Split into lines for easier handling + lines = file_content.splitlines() + original_content = file_content + + # Find the insertion point + insertion_point = -1 + pattern = after_pattern if after_pattern else before_pattern + for i, line in enumerate(lines): + if pattern in line: + insertion_point = i + # For after_pattern, insert after this line + if after_pattern: + insertion_point += 1 + break + + if insertion_point == -1: + self.io.tool_error(f"Pattern '{pattern}' not found in file") + return f"Error: Pattern not found in file" + + # Insert the content + content_lines = content.splitlines() + new_lines = lines[:insertion_point] + content_lines + lines[insertion_point:] + new_content = '\n'.join(new_lines) + + if original_content == new_content: + self.io.tool_warning(f"No changes made: insertion would not change file") + return f"Warning: No changes made (insertion would not change file)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content) + + # Track the change + metadata = { + 'insertion_point': insertion_point, + 'after_pattern': after_pattern, + 'before_pattern': before_pattern, + 'content': content + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='insertblock', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + pattern_type = "after" if after_pattern else "before" + self.io.tool_output(f"✅ Inserted block {pattern_type} pattern in {file_path} (change_id: {change_id})") + return f"Successfully inserted block (change_id: {change_id})" + else: + self.io.tool_output(f"Did not insert block in {file_path} (--dry-run)") + return f"Did not insert block (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in InsertBlock: {str(e)}") + return f"Error: {str(e)}" + + def _execute_delete_block(self, file_path, start_pattern, end_pattern=None, line_count=None, change_id=None): + """ + Delete a block of text between start_pattern and end_pattern (inclusive). + + Parameters: + - file_path: Path to the file to modify + - start_pattern: Pattern marking the start of the block to delete + - end_pattern: Pattern marking the end of the block to delete + - line_count: Number of lines to delete (alternative to end_pattern) + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + file_content = self.io.read_text(abs_path) + if file_content is None: + return f"Error reading file: {file_path}" + + # Validate we have either end_pattern or line_count, but not both + if end_pattern and line_count: + self.io.tool_error("Cannot specify both end_pattern and line_count") + return "Error: Cannot specify both end_pattern and line_count" + + # Split into lines for easier handling + lines = file_content.splitlines() + original_content = file_content + + # Find the start line + start_line = -1 + for i, line in enumerate(lines): + if start_pattern in line: + start_line = i + break + + if start_line == -1: + self.io.tool_error(f"Start pattern '{start_pattern}' not found in file") + return f"Error: Start pattern not found in file" + + # Find the end line + end_line = -1 + if end_pattern: + for i in range(start_line + 1, len(lines)): + if end_pattern in lines[i]: + end_line = i + break + + if end_line == -1: + self.io.tool_error(f"End pattern '{end_pattern}' not found after start pattern") + return f"Error: End pattern not found after start pattern" + elif line_count: + # Calculate end line based on start line and line count + end_line = min(start_line + line_count - 1, len(lines) - 1) + else: + # If neither is specified, delete just the start line + end_line = start_line + + # Delete the block + deleted_lines = lines[start_line:end_line+1] + new_lines = lines[:start_line] + lines[end_line+1:] + new_content = '\n'.join(new_lines) + + if original_content == new_content: + self.io.tool_warning(f"No changes made: deletion would not change file") + return f"Warning: No changes made (deletion would not change file)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content) + + # Track the change + metadata = { + 'start_line': start_line, + 'end_line': end_line, + 'start_pattern': start_pattern, + 'end_pattern': end_pattern, + 'line_count': line_count, + 'deleted_content': '\n'.join(deleted_lines) + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='deleteblock', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + self.io.tool_output(f"✅ Deleted {end_line - start_line + 1} lines from {file_path} (change_id: {change_id})") + return f"Successfully deleted {end_line - start_line + 1} lines (change_id: {change_id})" + else: + self.io.tool_output(f"Did not delete block in {file_path} (--dry-run)") + return f"Did not delete block (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in DeleteBlock: {str(e)}") + return f"Error: {str(e)}" + + def _execute_undo_change(self, change_id=None, last_file=None): + """ + Undo a specific change by ID, or the last change to a file. + + Parameters: + - change_id: ID of the change to undo + - last_file: Path to file where the last change should be undone + + Returns a result message. + """ + try: + # Validate parameters + if change_id is None and last_file is None: + self.io.tool_error("Must specify either change_id or last_file") + return "Error: Must specify either change_id or last_file" + + # If last_file is specified, get the most recent change for that file + if last_file: + abs_path = self.abs_root_path(last_file) + rel_path = self.get_rel_fname(abs_path) + + change_id = self.change_tracker.get_last_change(rel_path) + if not change_id: + self.io.tool_error(f"No changes found for file '{last_file}'") + return f"Error: No changes found for file" + + # Attempt to undo the change + success, message, change_info = self.change_tracker.undo_change(change_id) + + if not success: + self.io.tool_error(message) + return f"Error: {message}" + + # Apply the undo by restoring the original content + if change_info: + file_path = change_info['file_path'] + abs_path = self.abs_root_path(file_path) + + # Write the original content back to the file + if not self.dry_run: + self.io.write_text(abs_path, change_info['original']) + self.aider_edited_files.add(file_path) + + change_type = change_info['type'] + self.io.tool_output(f"✅ Undid {change_type} in {file_path} (change_id: {change_id})") + return f"Successfully undid {change_type} (change_id: {change_id})" + else: + self.io.tool_output(f"Did not undo change in {file_path} (--dry-run)") + return f"Did not undo change (--dry-run)" + + return "Error: Failed to undo change (unknown reason)" + + except Exception as e: + self.io.tool_error(f"Error in UndoChange: {str(e)}") + return f"Error: {str(e)}" + + def _execute_replace_line(self, file_path, line_number, new_content, change_id=None): + """ + Replace a specific line identified by line number. + Useful for fixing errors identified by error messages or linters. + + Parameters: + - file_path: Path to the file to modify + - line_number: The line number to replace (1-based) + - new_content: New content for the line + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + file_content = self.io.read_text(abs_path) + if file_content is None: + return f"Error reading file: {file_path}" + + # Split into lines + lines = file_content.splitlines() + + # Validate line number + if not isinstance(line_number, int): + try: + line_number = int(line_number) + except ValueError: + self.io.tool_error(f"Line number must be an integer, got '{line_number}'") + return f"Error: Line number must be an integer" + + # Convert 1-based line number (what most editors and error messages use) to 0-based index + idx = line_number - 1 + + if idx < 0 or idx >= len(lines): + self.io.tool_error(f"Line number {line_number} is out of range (file has {len(lines)} lines)") + return f"Error: Line number out of range" + + # Store original content for change tracking + original_content = file_content + original_line = lines[idx] + + # Replace the line + lines[idx] = new_content + + # Join lines back into a string + new_content_full = '\n'.join(lines) + + if original_content == new_content_full: + self.io.tool_warning("No changes made: new line content is identical to original") + return f"Warning: No changes made (new content identical to original)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content_full) + + # Track the change + metadata = { + 'line_number': line_number, + 'original_line': original_line, + 'new_line': new_content + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='replaceline', + original_content=original_content, + new_content=new_content_full, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + + # Create a readable diff for the line replacement + diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}" + + self.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})") + return f"Successfully replaced line {line_number} (change_id: {change_id}):\n{diff}" + else: + self.io.tool_output(f"Did not replace line in {file_path} (--dry-run)") + return f"Did not replace line (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in ReplaceLine: {str(e)}") + return f"Error: {str(e)}" + + def _execute_replace_lines(self, file_path, start_line, end_line, new_content, change_id=None): + """ + Replace a range of lines identified by line numbers. + Useful for fixing errors identified by error messages or linters. + + Parameters: + - file_path: Path to the file to modify + - start_line: The first line number to replace (1-based) + - end_line: The last line number to replace (1-based) + - new_content: New content for the lines (can be multi-line) + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + file_content = self.io.read_text(abs_path) + if file_content is None: + return f"Error reading file: {file_path}" + + # Convert line numbers to integers if needed + if not isinstance(start_line, int): + try: + start_line = int(start_line) + except ValueError: + self.io.tool_error(f"Start line must be an integer, got '{start_line}'") + return f"Error: Start line must be an integer" + + if not isinstance(end_line, int): + try: + end_line = int(end_line) + except ValueError: + self.io.tool_error(f"End line must be an integer, got '{end_line}'") + return f"Error: End line must be an integer" + + # Split into lines + lines = file_content.splitlines() + + # Convert 1-based line numbers to 0-based indices + start_idx = start_line - 1 + end_idx = end_line - 1 + + # Validate line numbers + if start_idx < 0 or start_idx >= len(lines): + self.io.tool_error(f"Start line {start_line} is out of range (file has {len(lines)} lines)") + return f"Error: Start line out of range" + + if end_idx < start_idx or end_idx >= len(lines): + self.io.tool_error(f"End line {end_line} is out of range (must be >= start line and < {len(lines)})") + return f"Error: End line out of range" + + # Store original content for change tracking + original_content = file_content + replaced_lines = lines[start_idx:end_idx+1] + + # Split the new content into lines + new_lines = new_content.splitlines() + + # Perform the replacement + new_full_lines = lines[:start_idx] + new_lines + lines[end_idx+1:] + new_content_full = '\n'.join(new_full_lines) + + if original_content == new_content_full: + self.io.tool_warning("No changes made: new content is identical to original") + return f"Warning: No changes made (new content identical to original)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content_full) + + # Track the change + metadata = { + 'start_line': start_line, + 'end_line': end_line, + 'replaced_lines': replaced_lines, + 'new_lines': new_lines + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='replacelines', + original_content=original_content, + new_content=new_content_full, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + replaced_count = end_line - start_line + 1 + new_count = len(new_lines) + + # Create a readable diff for the lines replacement + diff = f"Lines {start_line}-{end_line}:\n" + # Add removed lines with - prefix + for line in replaced_lines: + diff += f"- {line}\n" + # Add separator + diff += "---\n" + # Add new lines with + prefix + for line in new_lines: + diff += f"+ {line}\n" + + self.io.tool_output(f"✅ Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path} (change_id: {change_id})") + return f"Successfully replaced lines {start_line}-{end_line} with {new_count} new lines (change_id: {change_id}):\n{diff}" + else: + self.io.tool_output(f"Did not replace lines in {file_path} (--dry-run)") + return f"Did not replace lines (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in ReplaceLines: {str(e)}") + return f"Error: {str(e)}" + + def _execute_indent_lines(self, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, change_id=None): + """ + Indent or unindent a block of lines in a file. + + Parameters: + - file_path: Path to the file to modify + - start_pattern: Pattern marking the start of the block to indent + - end_pattern: Pattern marking the end of the block to indent + - line_count: Number of lines to indent (alternative to end_pattern) + - indent_levels: Number of levels to indent (positive) or unindent (negative) + - change_id: Optional ID for tracking the change + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = self.abs_root_path(file_path) + rel_path = self.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + self.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in self.abs_fnames: + if abs_path in self.abs_read_only_fnames: + self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Read file content + file_content = self.io.read_text(abs_path) + if file_content is None: + return f"Error reading file: {file_path}" + + # Validate we have either end_pattern or line_count, but not both + if end_pattern and line_count: + self.io.tool_error("Cannot specify both end_pattern and line_count") + return "Error: Cannot specify both end_pattern and line_count" + + # Split into lines for easier handling + lines = file_content.splitlines() + original_content = file_content + + # Find the start line + start_line = -1 + for i, line in enumerate(lines): + if start_pattern in line: + start_line = i + break + + if start_line == -1: + self.io.tool_error(f"Start pattern '{start_pattern}' not found in file") + return f"Error: Start pattern not found in file" + + # Find the end line + end_line = -1 + if end_pattern: + for i in range(start_line + 1, len(lines)): + if end_pattern in lines[i]: + end_line = i + break + + if end_line == -1: + self.io.tool_error(f"End pattern '{end_pattern}' not found after start pattern") + return f"Error: End pattern not found after start pattern" + elif line_count: + # Calculate end line based on start line and line count + end_line = min(start_line + line_count - 1, len(lines) - 1) + else: + # If neither is specified, indent just the start line + end_line = start_line + + # Determine indentation amount (4 spaces per level) + indent_spaces = 4 * indent_levels + + # Apply indentation + for i in range(start_line, end_line + 1): + if indent_levels > 0: + # Add indentation + lines[i] = ' ' * indent_spaces + lines[i] + else: + # Remove indentation, but do not remove more than exists + spaces_to_remove = min(abs(indent_spaces), len(lines[i]) - len(lines[i].lstrip())) + if spaces_to_remove > 0: + lines[i] = lines[i][spaces_to_remove:] + + # Join lines back into a string + new_content = '\n'.join(lines) + + if original_content == new_content: + self.io.tool_warning(f"No changes made: indentation would not change file") + return f"Warning: No changes made (indentation would not change file)" + + # Write the modified content back to the file + if not self.dry_run: + self.io.write_text(abs_path, new_content) + + # Track the change + metadata = { + 'start_line': start_line, + 'end_line': end_line, + 'start_pattern': start_pattern, + 'end_pattern': end_pattern, + 'line_count': line_count, + 'indent_levels': indent_levels + } + change_id = self.change_tracker.track_change( + file_path=rel_path, + change_type='indentlines', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + + self.aider_edited_files.add(rel_path) + action = "Indented" if indent_levels > 0 else "Unindented" + levels = abs(indent_levels) + level_text = "level" if levels == 1 else "levels" + self.io.tool_output(f"✅ {action} {end_line - start_line + 1} lines by {levels} {level_text} in {file_path} (change_id: {change_id})") + return f"Successfully {action.lower()} {end_line - start_line + 1} lines by {levels} {level_text} (change_id: {change_id})" + else: + self.io.tool_output(f"Did not indent lines in {file_path} (--dry-run)") + return f"Did not indent lines (--dry-run)" + + except Exception as e: + self.io.tool_error(f"Error in IndentLines: {str(e)}") + return f"Error: {str(e)}" + + def _execute_list_changes(self, file_path=None, limit=10): + """ + List recent changes made to files. + + Parameters: + - file_path: Optional path to filter changes by file + - limit: Maximum number of changes to list + + Returns a formatted list of changes. + """ + try: + # If file_path is specified, get the absolute path + rel_file_path = None + if file_path: + abs_path = self.abs_root_path(file_path) + rel_file_path = self.get_rel_fname(abs_path) + + # Get the list of changes + changes = self.change_tracker.list_changes(rel_file_path, limit) + + if not changes: + if file_path: + return f"No changes found for file '{file_path}'" + else: + return "No changes have been made yet" + + # Format the changes into a readable list + result = "Recent changes:\n" + for i, change in enumerate(changes): + change_time = datetime.fromtimestamp(change['timestamp']).strftime('%H:%M:%S') + change_type = change['type'] + file_path = change['file_path'] + change_id = change['id'] + + result += f"{i+1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n" + + self.io.tool_output(result) + return result + + except Exception as e: + self.io.tool_error(f"Error in ListChanges: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 2db0292cc..e4592dfaa 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -12,7 +12,7 @@ class NavigatorPrompts(CoderPrompts): LLM to manage its own context by adding/removing files and executing commands. """ - main_system = """ + main_system = r''' ## Role and Purpose Act as an expert software engineer with the ability to autonomously navigate and modify a codebase. @@ -60,10 +60,50 @@ Act as an expert software engineer with the ability to autonomously navigate and - **MakeReadonly**: `[tool_call(MakeReadonly, file_path="src/main.py")]` Convert an editable file back to read-only status. +### Granular Editing Tools +- **ReplaceText**: `[tool_call(ReplaceText, file_path="path/to/file.py", find_text="old text", replace_text="new text", near_context="unique nearby text", occurrence=1)]` + Replace specific text with new text. Use near_context to disambiguate between multiple occurrences. + Set occurrence to -1 for the last occurrence, or a number for a specific occurrence. + +- **ReplaceAll**: `[tool_call(ReplaceAll, file_path="path/to/file.py", find_text="oldVar", replace_text="newVar")]` + Replace all occurrences of text in a file. Useful for renaming variables, function names, etc. + +- **InsertBlock**: `[tool_call(InsertBlock, file_path="path/to/file.py", content=""" +def new_function(): + return True +""", after_pattern="# Insert after this line")]` + Insert a block of text after or before a pattern. Use single quotes with escaped newlines for multi-line content. + Specify either after_pattern or before_pattern to place the block. + +- **DeleteBlock**: `[tool_call(DeleteBlock, file_path="path/to/file.py", start_pattern="def old_function", end_pattern="# End function")]` + Delete a block of text from start_pattern to end_pattern (inclusive). + Alternatively, use line_count instead of end_pattern to delete a specific number of lines. + +- **ReplaceLine**: `[tool_call(ReplaceLine, file_path="path/to/file.py", line_number=42, new_content="def fixed_function(param):")]` + Replace a specific line by its line number. Especially useful for fixing errors or lint warnings that include line numbers. + Line numbers are 1-based (as in most editors and error messages). + +- **ReplaceLines**: `[tool_call(ReplaceLines, file_path="path/to/file.py", start_line=42, end_line=45, new_content=""" +def better_function(param): + # Fixed implementation + return process(param) +""")]` + Replace a range of lines by line numbers. Useful for fixing multiple lines referenced in error messages. + The new_content can contain any number of lines, not just the same count as the original range. + +- **IndentLines**: `[tool_call(IndentLines, file_path="path/to/file.py", start_pattern="def my_function", end_pattern="return result", indent_levels=1)]` + Indent or unindent a block of lines. Use positive indent_levels to increase indentation or negative to decrease. + Specify either end_pattern or line_count to determine the range of lines to indent. + +- **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` + Undo a specific change by its ID. Alternatively, use last_file="path/to/file.py" to undo the most recent change to that file. + +- **ListChanges**: `[tool_call(ListChanges, file_path="path/to/file.py", limit=5)]` + List recent changes made to files. Optionally filter by file_path and limit the number of results. + ### Other Tools - **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` Execute a shell command. Requires user confirmation. - **Do NOT use this for aider commands starting with `/` (like `/add`, `/run`, `/diff`).** ### Multi-Turn Exploration When you include any tool call, the system will automatically continue to the next round. @@ -88,6 +128,13 @@ When you include any tool call, the system will automatically continue to the ne - Target specific patterns rather than overly broad searches - Remember the `Find` tool is optimized for locating symbols across the codebase +### Granular Editing Workflow +1. **Discover and Add Files**: Use Glob, Grep, Find to locate relevant files +2. **Make Files Editable**: Convert read-only files to editable with MakeEditable +3. **Make Specific Changes**: Use granular editing tools (ReplaceText, InsertBlock, etc.) for precise edits +4. **Review Changes**: List applied changes with ListChanges +5. **Fix Mistakes**: If needed, undo changes with UndoChange by specific ID or last change to a file + ### Context Management Strategy - Keep your context focused by removing files that are no longer relevant - For large codebases, maintain only 5-15 files in context at once for best performance @@ -98,17 +145,83 @@ When you include any tool call, the system will automatically continue to the ne ## Code Editing Process -### SEARCH/REPLACE Block Format -When proposing code changes, describe each change with a SEARCH/REPLACE block using this exact format: +### Granular Editing with Tool Calls +For precise, targeted edits to code, use the granular editing tools: -```language_name -/path/to/file.ext +- **ReplaceText**: Replace specific instances of text in a file +- **ReplaceAll**: Replace all occurrences of text in a file (e.g., rename variables) +- **InsertBlock**: Insert multi-line blocks of code at specific locations +- **DeleteBlock**: Remove specific sections of code +- **ReplaceLine/ReplaceLines**: Fix specific line numbers from error messages or linters +- **IndentLines**: Adjust indentation of code blocks +- **UndoChange**: Reverse specific changes by ID if you make a mistake + +#### When to Use Line Number Based Tools + +When dealing with errors or warnings that include line numbers, prefer the line-based editing tools: + +``` +Error in /path/to/file.py line 42: Syntax error: unexpected token +Warning in /path/to/file.py lines 105-107: This block should be indented +``` + +For these cases, use: +- `ReplaceLine` for single line fixes (e.g., syntax errors) +- `ReplaceLines` for multi-line issues +- `IndentLines` for indentation problems + +#### Multiline Tool Call Content Format + +When providing multiline content in tool calls (like ReplaceLines, InsertBlock), one leading and one trailing +newline will be automatically trimmed if present. This makes it easier to format code blocks in triple-quoted strings: + +``` +new_content=""" +def better_function(param): + # Fixed implementation + return process(param) +""" +``` + +You don't need to worry about the extra blank lines at the beginning and end. If you actually need to +preserve blank lines in your output, simply add an extra newline: + +``` +new_content=""" + +def better_function(param): # Note the extra newline above to preserve a blank line + # Fixed implementation + return process(param) +""" +``` + +Example of inserting a new multi-line function: +``` +[tool_call(InsertBlock, + file_path="src/utils.py", + after_pattern="def existing_function():", + content=""" +def new_function(param1, param2): + # This is a new utility function + result = process_data(param1) + if result and param2: + return result + return None +""")] +``` + +### SEARCH/REPLACE Block Format (Alternative Method) +For larger changes that involve multiple edits or significant restructuring, you can still use SEARCH/REPLACE blocks with this exact format: + +````python +path/to/file.ext <<<<<<< SEARCH Original code lines to match exactly ======= Replacement code lines >>>>>>> REPLACE -``` +```` +NOTE that this uses four backticks as the fence and not three! ### Editing Guidelines - Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation @@ -123,6 +236,7 @@ Replacement code lines - If tools return errors or unexpected results, try alternative approaches - Refine search patterns if results are too broad or too narrow - Use the enhanced context blocks (directory structure and git status) to orient yourself +- Use ListChanges to see what edits have been made and UndoChange to revert mistakes Always reply to the user in {language}. @@ -179,7 +293,7 @@ Would you like me to explain any specific part of the authentication process in These files have been added to the chat so you can see all of their contents. Trust this message as the true contents of the files! -""" +''' files_content_assistant_reply = ( "I understand. I'll use these files to help with your request." From a24ef3694a6f288ab561e2e836e0417d35a3389c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 03:12:45 -0400 Subject: [PATCH 08/63] Granular edits with dry_run --- aider/coders/navigator_coder.py | 1299 ++++++++++++++++++----------- aider/coders/navigator_prompts.py | 60 +- 2 files changed, 843 insertions(+), 516 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 40f669b64..5155318dc 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -584,69 +584,78 @@ class NavigatorCoder(Coder): find_text = params.get('find_text') replace_text = params.get('replace_text') near_context = params.get('near_context') - occurrence = params.get('occurrence', 1) + occurrence = params.get('occurrence', 1) # Default to first occurrence change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # Default to False + if file_path is not None and find_text is not None and replace_text is not None: result_message = self._execute_replace_text( - file_path, find_text, replace_text, near_context, occurrence, change_id + file_path, find_text, replace_text, near_context, occurrence, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for ReplaceText" + result_message = "Error: Missing required parameters for ReplaceText (file_path, find_text, replace_text)" elif norm_tool_name == 'replaceall': file_path = params.get('file_path') find_text = params.get('find_text') replace_text = params.get('replace_text') change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # Default to False + if file_path is not None and find_text is not None and replace_text is not None: result_message = self._execute_replace_all( - file_path, find_text, replace_text, change_id + file_path, find_text, replace_text, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for ReplaceAll" + result_message = "Error: Missing required parameters for ReplaceAll (file_path, find_text, replace_text)" elif norm_tool_name == 'insertblock': file_path = params.get('file_path') content = params.get('content') after_pattern = params.get('after_pattern') before_pattern = params.get('before_pattern') + near_context = params.get('near_context') # New + occurrence = params.get('occurrence', 1) # New, default 1 change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # New, default False + if file_path is not None and content is not None and (after_pattern is not None or before_pattern is not None): result_message = self._execute_insert_block( - file_path, content, after_pattern, before_pattern, change_id + file_path, content, after_pattern, before_pattern, near_context, occurrence, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for InsertBlock" + result_message = "Error: Missing required parameters for InsertBlock (file_path, content, and either after_pattern or before_pattern)" elif norm_tool_name == 'deleteblock': file_path = params.get('file_path') start_pattern = params.get('start_pattern') end_pattern = params.get('end_pattern') line_count = params.get('line_count') + near_context = params.get('near_context') # New + occurrence = params.get('occurrence', 1) # New, default 1 change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # New, default False + if file_path is not None and start_pattern is not None: result_message = self._execute_delete_block( - file_path, start_pattern, end_pattern, line_count, change_id + file_path, start_pattern, end_pattern, line_count, near_context, occurrence, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for DeleteBlock" + result_message = "Error: Missing required parameters for DeleteBlock (file_path, start_pattern)" elif norm_tool_name == 'replaceline': file_path = params.get('file_path') line_number = params.get('line_number') new_content = params.get('new_content') change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # New, default False + if file_path is not None and line_number is not None and new_content is not None: result_message = self._execute_replace_line( - file_path, line_number, new_content, change_id + file_path, line_number, new_content, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for ReplaceLine" + result_message = "Error: Missing required parameters for ReplaceLine (file_path, line_number, new_content)" elif norm_tool_name == 'replacelines': file_path = params.get('file_path') @@ -654,28 +663,32 @@ class NavigatorCoder(Coder): end_line = params.get('end_line') new_content = params.get('new_content') change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # New, default False + if file_path is not None and start_line is not None and end_line is not None and new_content is not None: result_message = self._execute_replace_lines( - file_path, start_line, end_line, new_content, change_id + file_path, start_line, end_line, new_content, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for ReplaceLines" + result_message = "Error: Missing required parameters for ReplaceLines (file_path, start_line, end_line, new_content)" elif norm_tool_name == 'indentlines': file_path = params.get('file_path') start_pattern = params.get('start_pattern') end_pattern = params.get('end_pattern') line_count = params.get('line_count') - indent_levels = params.get('indent_levels', 1) + indent_levels = params.get('indent_levels', 1) # Default to indent 1 level + near_context = params.get('near_context') # New + occurrence = params.get('occurrence', 1) # New, default 1 change_id = params.get('change_id') - + dry_run = params.get('dry_run', False) # New, default False + if file_path is not None and start_pattern is not None: result_message = self._execute_indent_lines( - file_path, start_pattern, end_pattern, line_count, indent_levels, change_id + file_path, start_pattern, end_pattern, line_count, indent_levels, near_context, occurrence, change_id, dry_run ) else: - result_message = "Error: Missing required parameters for IndentLines" + result_message = "Error: Missing required parameters for IndentLines (file_path, start_pattern)" elif norm_tool_name == 'undochange': change_id = params.get('change_id') @@ -1634,9 +1647,57 @@ Just reply with fixed versions of the {blocks} above that failed to match. return True + # ------------------- Helper for finding occurrences ------------------- + + def _find_occurrences(self, content, pattern, near_context=None): + """Find all occurrences of pattern, optionally filtered by near_context.""" + occurrences = [] + start = 0 + while True: + index = content.find(pattern, start) + if index == -1: + break + + if near_context: + # Check if near_context is within a window around the match + window_start = max(0, index - 200) + window_end = min(len(content), index + len(pattern) + 200) + window = content[window_start:window_end] + if near_context in window: + occurrences.append(index) + else: + occurrences.append(index) + + start = index + 1 # Move past this occurrence's start + return occurrences + + # ------------------- Helper for finding occurrences ------------------- + + def _find_occurrences(self, content, pattern, near_context=None): + """Find all occurrences of pattern, optionally filtered by near_context.""" + occurrences = [] + start = 0 + while True: + index = content.find(pattern, start) + if index == -1: + break + + if near_context: + # Check if near_context is within a window around the match + window_start = max(0, index - 200) + window_end = min(len(content), index + len(pattern) + 200) + window = content[window_start:window_end] + if near_context in window: + occurrences.append(index) + else: + occurrences.append(index) + + start = index + 1 # Move past this occurrence's start + return occurrences + # ------------------- Granular Editing Tools ------------------- - def _execute_replace_text(self, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None): + def _execute_replace_text(self, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False): """ Replace specific text with new text, optionally using nearby context for disambiguation. @@ -1648,6 +1709,12 @@ Just reply with fixed versions of the {blocks} above that failed to match. - occurrence: Which occurrence to replace (1-based index, or -1 for last) - change_id: Optional ID for tracking the change + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + Returns a result message. """ try: @@ -1668,56 +1735,43 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) content = self.io.read_text(abs_path) if content is None: - return f"Error reading file: {file_path}" - - # If near_context is provided, narrow down the search - if near_context: - # Find the section containing both find_text and near_context - sections = [] - for i in range(len(content)): - if i + len(find_text) <= len(content) and content[i:i+len(find_text)] == find_text: - # Look for near_context within a reasonable window (e.g., 200 chars) - window_start = max(0, i - 200) - window_end = min(len(content), i + len(find_text) + 200) - window = content[window_start:window_end] - if near_context in window: - sections.append(i) - - if not sections: - self.io.tool_error(f"Could not find '{find_text}' near '{near_context}'") - return f"Error: Text not found near specified context" - - # Select the occurrence (1-based index) + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before ReplaceText operation.") + return f"Error: Could not read file '{file_path}'" + # Find occurrences using helper function + occurrences = self._find_occurrences(content, find_text, near_context) + + if not occurrences: + err_msg = f"Text '{find_text}' not found" + if near_context: + err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + + # Select the occurrence (handle 1-based index and -1 for last) + num_occurrences = len(occurrences) + try: + occurrence = int(occurrence) # Ensure occurrence is an integer if occurrence == -1: # Last occurrence - start_index = sections[-1] + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based else: - occurrence_idx = min(occurrence - 1, len(sections) - 1) - start_index = sections[occurrence_idx] - else: - # Find all occurrences of find_text - sections = [] - start = 0 - while True: - start = content.find(find_text, start) - if start == -1: - break - sections.append(start) - start += 1 # Move past this occurrence - - if not sections: - self.io.tool_error(f"Text '{find_text}' not found in file") - return f"Error: Text not found in file" - - # Select the occurrence (1-based index) - if occurrence == -1: # Last occurrence - start_index = sections[-1] - else: - occurrence_idx = min(occurrence - 1, len(sections) - 1) - start_index = sections[occurrence_idx] + err_msg = f"Occurrence number {occurrence} is out of range. Found {num_occurrences} occurrences of '{find_text}'" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + except ValueError: + self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_index = occurrences[target_idx] # Perform the replacement original_content = content @@ -1726,12 +1780,20 @@ Just reply with fixed versions of the {blocks} above that failed to match. if original_content == new_content: self.io.tool_warning(f"No changes made: replacement text is identical to original") return f"Warning: No changes made (replacement identical to original)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content) - - # Track the change + + # Generate diff for feedback + diff_example = self._generate_diff_snippet(original_content, start_index, len(find_text), replace_text) + + # Handle dry run (Implements Point 6) + if dry_run: + self.io.tool_output(f"Dry run: Would replace occurrence {occurrence} of '{find_text}' in {file_path}") + return f"Dry run: Would replace text (occurrence {occurrence}). Diff snippet:\n{diff_example}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content) + + # Track the change + try: metadata = { 'start_index': start_index, 'find_text': find_text, @@ -1747,64 +1809,23 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - - # Get more context around the replace (get up to 3 lines before and after) - lines = content.splitlines() - found_idx = -1 - for i, line in enumerate(lines): - if start_index < len(''.join(lines[:i+1])) + i: # Account for newlines - found_idx = i - break - - if found_idx != -1: - # Get lines with context - start_line = max(0, found_idx - 3) - end_line = min(len(lines) - 1, found_idx + 3) - - # Format the diff in git style - diff_lines = [] - for i in range(start_line, end_line + 1): - if i == found_idx: - # This is the line containing the change - line = lines[i] - # Find position of match within the line - line_start = start_index - len(''.join(lines[:i])) - i - if line_start >= 0 and line_start + len(find_text) <= len(line): - # If we can isolate the exact position in the line - old_line = line - new_line = line[:line_start] + replace_text + line[line_start + len(find_text):] - diff_lines.append(f"- {old_line}") - diff_lines.append(f"+ {new_line}") - else: - # If we can't isolate exact position (e.g., multi-line match) - diff_lines.append(f"- {line}") - # Try our best approximation for the new line - if find_text in line: - diff_lines.append(f"+ {line.replace(find_text, replace_text)}") - else: - diff_lines.append(f"+ [modified line]") - else: - # Context line, prefix with space - diff_lines.append(f" {lines[i]}") - - diff_example = f"@@ line {start_line+1},{end_line+1} @@\n" + "\n".join(diff_lines) - else: - # Fallback if we can't locate the exact line - diff_example = f"- {find_text}\n+ {replace_text}" - - self.io.tool_output(f"✅ Replaced text in {file_path} (change_id: {change_id})") - return f"Successfully replaced text (change_id: {change_id}):\n{diff_example}" - else: - self.io.tool_output(f"Did not replace text in {file_path} (--dry-run)") - return f"Did not replace text (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for ReplaceText: {track_e}") + # Continue even if tracking fails, but warn + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + + # Improve feedback (Point 5 & 6) + occurrence_str = f"occurrence {occurrence}" if num_occurrences > 1 else "text" + self.io.tool_output(f"✅ Replaced {occurrence_str} in {file_path} (change_id: {change_id})") + return f"Successfully replaced {occurrence_str} (change_id: {change_id}). Diff snippet:\n{diff_example}" + except Exception as e: - self.io.tool_error(f"Error in ReplaceText: {str(e)}") + self.io.tool_error(f"Error in ReplaceText: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - def _execute_replace_all(self, file_path, find_text, replace_text, change_id=None): + def _execute_replace_all(self, file_path, find_text, replace_text, change_id=None, dry_run=False): """ Replace all occurrences of text in a file. @@ -1834,11 +1855,13 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) content = self.io.read_text(abs_path) if content is None: - return f"Error reading file: {file_path}" + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before ReplaceAll operation.") + return f"Error: Could not read file '{file_path}'" # Count occurrences count = content.count(find_text) @@ -1853,12 +1876,20 @@ Just reply with fixed versions of the {blocks} above that failed to match. if original_content == new_content: self.io.tool_warning(f"No changes made: replacement text is identical to original") return f"Warning: No changes made (replacement identical to original)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content) - - # Track the change + + # Generate diff for feedback (more comprehensive for ReplaceAll) + diff_examples = self._generate_diff_chunks(original_content, find_text, replace_text) + + # Handle dry run (Implements Point 6) + if dry_run: + self.io.tool_output(f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}") + return f"Dry run: Would replace {count} occurrences. Diff examples:\n{diff_examples}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content) + + # Track the change + try: metadata = { 'find_text': find_text, 'replace_text': replace_text, @@ -1872,110 +1903,35 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - - # Build a mapping of line number to replacements on that line - line_changes = {} - - # Split content into lines - lines = content.splitlines() - - # Keep track of character position across all lines - char_pos = 0 - for line_idx, line in enumerate(lines): - line_len = len(line) - - # Look for occurrences within this line - line_pos = 0 - while line_pos <= line_len - len(find_text): - match_pos = line[line_pos:].find(find_text) - if match_pos == -1: - break - - # Found a match in this line - true_pos = line_pos + match_pos - if line_idx not in line_changes: - line_changes[line_idx] = [] - - line_changes[line_idx].append((true_pos, find_text, replace_text)) - - # Move past this match - line_pos = true_pos + len(find_text) - - # Move to next line (add 1 for the newline) - char_pos += line_len + 1 - - # Generate git-style diffs for each affected line with context - diff_chunks = [] - sorted_changed_lines = sorted(line_changes.keys()) - - # Group adjacent changed lines into chunks - chunks = [] - current_chunk = [] - - for line_idx in sorted_changed_lines: - if not current_chunk or line_idx <= current_chunk[-1] + 6: # Keep chunks within 6 lines - current_chunk.append(line_idx) - else: - chunks.append(current_chunk) - current_chunk = [line_idx] - - if current_chunk: - chunks.append(current_chunk) - - # Generate diff for each chunk - for chunk in chunks: - min_line = max(0, min(chunk) - 3) # 3 lines of context before - max_line = min(len(lines) - 1, max(chunk) + 3) # 3 lines of context after - - diff_lines = [] - diff_lines.append(f"@@ line {min_line+1},{max_line+1} @@") - - for i in range(min_line, max_line + 1): - if i in line_changes: - # This is a line with changes - original_line = lines[i] - modified_line = original_line - - # Apply all replacements to this line - # We need to apply them from right to left to maintain correct positions - changes = sorted(line_changes[i], key=lambda x: x[0], reverse=True) - for pos, old_text, new_text in changes: - modified_line = modified_line[:pos] + new_text + modified_line[pos + len(old_text):] - - diff_lines.append(f"- {original_line}") - diff_lines.append(f"+ {modified_line}") - else: - # Context line - diff_lines.append(f" {lines[i]}") - - diff_chunks.append("\n".join(diff_lines)) - - # Join all chunks into a single diff - diff_examples = "\n\n".join(diff_chunks) if diff_chunks else "No changes shown (content parsing error)" - - self.io.tool_output(f"✅ Replaced {count} occurrences in {file_path} (change_id: {change_id})") - return f"Successfully replaced {count} occurrences (change_id: {change_id}) with examples:\n{diff_examples}" - else: - self.io.tool_output(f"Did not replace text in {file_path} (--dry-run)") - return f"Did not replace text (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for ReplaceAll: {track_e}") + # Continue even if tracking fails, but warn + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + + # Improve feedback (Point 6) + self.io.tool_output(f"✅ Replaced {count} occurrences in {file_path} (change_id: {change_id})") + return f"Successfully replaced {count} occurrences (change_id: {change_id}). Diff examples:\n{diff_examples}" + except Exception as e: - self.io.tool_error(f"Error in ReplaceAll: {str(e)}") + self.io.tool_error(f"Error in ReplaceAll: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - - def _execute_insert_block(self, file_path, content, after_pattern=None, before_pattern=None, change_id=None): + + def _execute_insert_block(self, file_path, content, after_pattern=None, before_pattern=None, near_context=None, occurrence=1, change_id=None, dry_run=False): """ Insert a block of text after or before a specified pattern. Parameters: - file_path: Path to the file to modify - content: Text block to insert - - after_pattern: Pattern after which to insert the block (line containing this pattern) - - before_pattern: Pattern before which to insert the block (line containing this pattern) + - after_pattern: Pattern after which to insert the block (line containing this pattern) - specify one of after/before + - before_pattern: Pattern before which to insert the block (line containing this pattern) - specify one of after/before + - near_context: Optional text nearby to help locate the correct instance of the pattern + - occurrence: Which occurrence of the pattern to use (1-based index, or -1 for last) - change_id: Optional ID for tracking the change - + - dry_run: If True, simulate the change without modifying the file + Returns a result message. """ try: @@ -1996,11 +1952,13 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) file_content = self.io.read_text(abs_path) if file_content is None: - return f"Error reading file: {file_path}" + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before InsertBlock operation.") + return f"Error: Could not read file '{file_path}'" # Validate we have either after_pattern or before_pattern, but not both if after_pattern and before_pattern: @@ -2013,40 +1971,85 @@ Just reply with fixed versions of the {blocks} above that failed to match. # Split into lines for easier handling lines = file_content.splitlines() original_content = file_content - - # Find the insertion point - insertion_point = -1 + + # Find occurrences of the pattern (either after_pattern or before_pattern) pattern = after_pattern if after_pattern else before_pattern + pattern_type = "after" if after_pattern else "before" + + # Find line indices containing the pattern + pattern_line_indices = [] for i, line in enumerate(lines): if pattern in line: - insertion_point = i - # For after_pattern, insert after this line - if after_pattern: - insertion_point += 1 - break - - if insertion_point == -1: - self.io.tool_error(f"Pattern '{pattern}' not found in file") - return f"Error: Pattern not found in file" - - # Insert the content + # If near_context is provided, check if it's nearby + if near_context: + context_window_start = max(0, i - 5) # Check 5 lines before/after + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + pattern_line_indices.append(i) + else: + pattern_line_indices.append(i) + + if not pattern_line_indices: + err_msg = f"Pattern '{pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + + # Select the occurrence (Implements Point 5) + num_occurrences = len(pattern_line_indices) + try: + occurrence = int(occurrence) # Ensure occurrence is an integer + if occurrence == -1: # Last occurrence + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based + else: + err_msg = f"Occurrence number {occurrence} is out of range for pattern '{pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + except ValueError: + self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + # Determine the final insertion line index + insertion_line_idx = pattern_line_indices[target_idx] + if pattern_type == "after": + insertion_line_idx += 1 # Insert on the line *after* the matched line + # Prepare the content to insert content_lines = content.splitlines() - new_lines = lines[:insertion_point] + content_lines + lines[insertion_point:] - new_content = '\n'.join(new_lines) - + + # Create the new lines array + new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:] + new_content = '\n'.join(new_lines) # Use '\n' to match io.write_text behavior + if original_content == new_content: self.io.tool_warning(f"No changes made: insertion would not change file") return f"Warning: No changes made (insertion would not change file)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content) - - # Track the change + + # Generate diff for feedback + diff_snippet = self._generate_diff_snippet_insert(original_content, insertion_line_idx, content_lines) + + # Handle dry run (Implements Point 6) + if dry_run: + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + self.io.tool_output(f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path}") + return f"Dry run: Would insert block. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content) + + # Track the change + try: metadata = { - 'insertion_point': insertion_point, + 'insertion_line_idx': insertion_line_idx, 'after_pattern': after_pattern, 'before_pattern': before_pattern, + 'near_context': near_context, + 'occurrence': occurrence, 'content': content } change_id = self.change_tracker.track_change( @@ -2057,30 +2060,35 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - pattern_type = "after" if after_pattern else "before" - self.io.tool_output(f"✅ Inserted block {pattern_type} pattern in {file_path} (change_id: {change_id})") - return f"Successfully inserted block (change_id: {change_id})" - else: - self.io.tool_output(f"Did not insert block in {file_path} (--dry-run)") - return f"Did not insert block (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for InsertBlock: {track_e}") + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + + # Improve feedback (Point 5 & 6) + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + self.io.tool_output(f"✅ Inserted block {pattern_type} {occurrence_str}pattern in {file_path} (change_id: {change_id})") + return f"Successfully inserted block (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + except Exception as e: - self.io.tool_error(f"Error in InsertBlock: {str(e)}") + self.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - def _execute_delete_block(self, file_path, start_pattern, end_pattern=None, line_count=None, change_id=None): + def _execute_delete_block(self, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False): """ Delete a block of text between start_pattern and end_pattern (inclusive). Parameters: - file_path: Path to the file to modify - - start_pattern: Pattern marking the start of the block to delete - - end_pattern: Pattern marking the end of the block to delete - - line_count: Number of lines to delete (alternative to end_pattern) + - start_pattern: Pattern marking the start of the block to delete (line containing this pattern) + - end_pattern: Optional pattern marking the end of the block (line containing this pattern) + - line_count: Optional number of lines to delete (alternative to end_pattern) + - near_context: Optional text nearby to help locate the correct instance of the start_pattern + - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - change_id: Optional ID for tracking the change - + - dry_run: If True, simulate the change without modifying the file + Returns a result message. """ try: @@ -2101,11 +2109,13 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) file_content = self.io.read_text(abs_path) if file_content is None: - return f"Error reading file: {file_path}" + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before DeleteBlock operation.") + return f"Error: Could not read file '{file_path}'" # Validate we have either end_pattern or line_count, but not both if end_pattern and line_count: @@ -2115,56 +2125,105 @@ Just reply with fixed versions of the {blocks} above that failed to match. # Split into lines for easier handling lines = file_content.splitlines() original_content = file_content - - # Find the start line - start_line = -1 + + # Find occurrences of the start_pattern (Implements Point 5) + start_pattern_line_indices = [] for i, line in enumerate(lines): if start_pattern in line: - start_line = i - break - - if start_line == -1: - self.io.tool_error(f"Start pattern '{start_pattern}' not found in file") - return f"Error: Start pattern not found in file" - - # Find the end line + # If near_context is provided, check if it's nearby + if near_context: + context_window_start = max(0, i - 5) # Check 5 lines before/after + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + start_pattern_line_indices.append(i) + else: + start_pattern_line_indices.append(i) + + if not start_pattern_line_indices: + err_msg = f"Start pattern '{start_pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + + # Select the occurrence for the start pattern + num_occurrences = len(start_pattern_line_indices) + try: + occurrence = int(occurrence) # Ensure occurrence is an integer + if occurrence == -1: # Last occurrence + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based + else: + err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + except ValueError: + self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_line = start_pattern_line_indices[target_idx] + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" # For messages + # Find the end line based on end_pattern or line_count end_line = -1 if end_pattern: - for i in range(start_line + 1, len(lines)): + # Search for end_pattern *after* the selected start_line + for i in range(start_line, len(lines)): # Include start_line itself if start/end are same line if end_pattern in lines[i]: end_line = i break - + if end_line == -1: - self.io.tool_error(f"End pattern '{end_pattern}' not found after start pattern") - return f"Error: End pattern not found after start pattern" + # Improve error message (Point 4) + err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" elif line_count: - # Calculate end line based on start line and line count - end_line = min(start_line + line_count - 1, len(lines) - 1) + try: + line_count = int(line_count) + if line_count <= 0: + raise ValueError("Line count must be positive") + # Calculate end line based on start line and line count + end_line = min(start_line + line_count - 1, len(lines) - 1) + except ValueError: + self.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + return f"Error: Invalid line_count value '{line_count}'" else: - # If neither is specified, delete just the start line + # If neither end_pattern nor line_count is specified, delete just the start line end_line = start_line - - # Delete the block + # Prepare the deletion deleted_lines = lines[start_line:end_line+1] new_lines = lines[:start_line] + lines[end_line+1:] - new_content = '\n'.join(new_lines) - + new_content = '\n'.join(new_lines) # Use '\n' to match io.write_text behavior + if original_content == new_content: self.io.tool_warning(f"No changes made: deletion would not change file") return f"Warning: No changes made (deletion would not change file)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content) - - # Track the change + + # Generate diff for feedback + diff_snippet = self._generate_diff_snippet_delete(original_content, start_line, end_line) + + # Handle dry run (Implements Point 6) + if dry_run: + self.io.tool_output(f"Dry run: Would delete lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") + return f"Dry run: Would delete block. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content) + + # Track the change + try: metadata = { - 'start_line': start_line, - 'end_line': end_line, + 'start_line': start_line + 1, # Store 1-based for consistency + 'end_line': end_line + 1, # Store 1-based 'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count, + 'near_context': near_context, + 'occurrence': occurrence, 'deleted_content': '\n'.join(deleted_lines) } change_id = self.change_tracker.track_change( @@ -2175,16 +2234,19 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - self.io.tool_output(f"✅ Deleted {end_line - start_line + 1} lines from {file_path} (change_id: {change_id})") - return f"Successfully deleted {end_line - start_line + 1} lines (change_id: {change_id})" - else: - self.io.tool_output(f"Did not delete block in {file_path} (--dry-run)") - return f"Did not delete block (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for DeleteBlock: {track_e}") + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + + # Improve feedback (Point 5 & 6) + num_deleted = end_line - start_line + 1 + self.io.tool_output(f"✅ Deleted {num_deleted} lines (from {occurrence_str}start pattern) in {file_path} (change_id: {change_id})") + return f"Successfully deleted {num_deleted} lines (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + except Exception as e: - self.io.tool_error(f"Error in DeleteBlock: {str(e)}") + self.io.tool_error(f"Error in DeleteBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" def _execute_undo_change(self, change_id=None, last_file=None): @@ -2195,55 +2257,57 @@ Just reply with fixed versions of the {blocks} above that failed to match. - change_id: ID of the change to undo - last_file: Path to file where the last change should be undone + Returns a result message. """ + # Note: Undo does not have a dry_run parameter as it's inherently about reverting a previous action. try: # Validate parameters if change_id is None and last_file is None: - self.io.tool_error("Must specify either change_id or last_file") - return "Error: Must specify either change_id or last_file" + self.io.tool_error("Must specify either change_id or last_file for UndoChange") + return "Error: Must specify either change_id or last_file" # Improve Point 4 # If last_file is specified, get the most recent change for that file if last_file: abs_path = self.abs_root_path(last_file) rel_path = self.get_rel_fname(abs_path) - + change_id = self.change_tracker.get_last_change(rel_path) if not change_id: - self.io.tool_error(f"No changes found for file '{last_file}'") - return f"Error: No changes found for file" - - # Attempt to undo the change + # Improve error message (Point 4) + self.io.tool_error(f"No tracked changes found for file '{last_file}' to undo.") + return f"Error: No changes found for file '{last_file}'" + # Attempt to get undo information from the tracker success, message, change_info = self.change_tracker.undo_change(change_id) - + if not success: - self.io.tool_error(message) + # Improve error message (Point 4) - message from tracker should be specific + self.io.tool_error(f"Failed to undo change '{change_id}': {message}") return f"Error: {message}" # Apply the undo by restoring the original content if change_info: file_path = change_info['file_path'] abs_path = self.abs_root_path(file_path) - # Write the original content back to the file - if not self.dry_run: - self.io.write_text(abs_path, change_info['original']) - self.aider_edited_files.add(file_path) - - change_type = change_info['type'] - self.io.tool_output(f"✅ Undid {change_type} in {file_path} (change_id: {change_id})") - return f"Successfully undid {change_type} (change_id: {change_id})" - else: - self.io.tool_output(f"Did not undo change in {file_path} (--dry-run)") - return f"Did not undo change (--dry-run)" - - return "Error: Failed to undo change (unknown reason)" - + # No dry_run check here, as undo implies a real action + self.io.write_text(abs_path, change_info['original']) + self.aider_edited_files.add(file_path) # Track that the file was modified by the undo + + change_type = change_info['type'] + # Improve feedback (Point 6) + self.io.tool_output(f"✅ Undid {change_type} change '{change_id}' in {file_path}") + return f"Successfully undid {change_type} change '{change_id}'." + else: + # This case should ideally not be reached if tracker returns success + self.io.tool_error(f"Failed to undo change '{change_id}': Change info missing after successful tracker update.") + return f"Error: Failed to undo change '{change_id}' (missing change info)" + except Exception as e: - self.io.tool_error(f"Error in UndoChange: {str(e)}") + self.io.tool_error(f"Error in UndoChange: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - def _execute_replace_line(self, file_path, line_number, new_content, change_id=None): + def _execute_replace_line(self, file_path, line_number, new_content, change_id=None, dry_run=False): """ Replace a specific line identified by line number. Useful for fixing errors identified by error messages or linters. @@ -2253,7 +2317,8 @@ Just reply with fixed versions of the {blocks} above that failed to match. - line_number: The line number to replace (1-based) - new_content: New content for the line - change_id: Optional ID for tracking the change - + - dry_run: If True, simulate the change without modifying the file + Returns a result message. """ try: @@ -2274,11 +2339,13 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) file_content = self.io.read_text(abs_path) if file_content is None: - return f"Error reading file: {file_path}" + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before ReplaceLine operation.") + return f"Error: Could not read file '{file_path}'" # Split into lines lines = file_content.splitlines() @@ -2289,14 +2356,17 @@ Just reply with fixed versions of the {blocks} above that failed to match. line_number = int(line_number) except ValueError: self.io.tool_error(f"Line number must be an integer, got '{line_number}'") - return f"Error: Line number must be an integer" - + # Improve error message (Point 4) + self.io.tool_error(f"Invalid line_number value: '{line_number}'. Must be an integer.") + return f"Error: Invalid line_number value '{line_number}'" + # Convert 1-based line number (what most editors and error messages use) to 0-based index idx = line_number - 1 - + if idx < 0 or idx >= len(lines): - self.io.tool_error(f"Line number {line_number} is out of range (file has {len(lines)} lines)") - return f"Error: Line number out of range" + # Improve error message (Point 4) + self.io.tool_error(f"Line number {line_number} is out of range for file '{file_path}' (has {len(lines)} lines).") + return f"Error: Line number {line_number} out of range" # Store original content for change tracking original_content = file_content @@ -2311,12 +2381,20 @@ Just reply with fixed versions of the {blocks} above that failed to match. if original_content == new_content_full: self.io.tool_warning("No changes made: new line content is identical to original") return f"Warning: No changes made (new content identical to original)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content_full) - - # Track the change + + # Create a readable diff for the line replacement + diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}" + + # Handle dry run (Implements Point 6) + if dry_run: + self.io.tool_output(f"Dry run: Would replace line {line_number} in {file_path}") + return f"Dry run: Would replace line {line_number}. Diff:\n{diff}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content_full) + + # Track the change + try: metadata = { 'line_number': line_number, 'original_line': original_line, @@ -2330,23 +2408,21 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - - # Create a readable diff for the line replacement - diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}" - - self.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})") - return f"Successfully replaced line {line_number} (change_id: {change_id}):\n{diff}" - else: - self.io.tool_output(f"Did not replace line in {file_path} (--dry-run)") - return f"Did not replace line (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for ReplaceLine: {track_e}") + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + + # Improve feedback (Point 6) + self.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})") + return f"Successfully replaced line {line_number} (change_id: {change_id}). Diff:\n{diff}" + except Exception as e: - self.io.tool_error(f"Error in ReplaceLine: {str(e)}") + self.io.tool_error(f"Error in ReplaceLine: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - def _execute_replace_lines(self, file_path, start_line, end_line, new_content, change_id=None): + def _execute_replace_lines(self, file_path, start_line, end_line, new_content, change_id=None, dry_run=False): """ Replace a range of lines identified by line numbers. Useful for fixing errors identified by error messages or linters. @@ -2357,7 +2433,8 @@ Just reply with fixed versions of the {blocks} above that failed to match. - end_line: The last line number to replace (1-based) - new_content: New content for the lines (can be multi-line) - change_id: Optional ID for tracking the change - + - dry_run: If True, simulate the change without modifying the file + Returns a result message. """ try: @@ -2378,26 +2455,30 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) file_content = self.io.read_text(abs_path) if file_content is None: - return f"Error reading file: {file_path}" + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before ReplaceLines operation.") + return f"Error: Could not read file '{file_path}'" # Convert line numbers to integers if needed if not isinstance(start_line, int): try: start_line = int(start_line) except ValueError: - self.io.tool_error(f"Start line must be an integer, got '{start_line}'") - return f"Error: Start line must be an integer" + # Improve error message (Point 4) + self.io.tool_error(f"Invalid start_line value: '{start_line}'. Must be an integer.") + return f"Error: Invalid start_line value '{start_line}'" if not isinstance(end_line, int): try: end_line = int(end_line) except ValueError: - self.io.tool_error(f"End line must be an integer, got '{end_line}'") - return f"Error: End line must be an integer" + # Improve error message (Point 4) + self.io.tool_error(f"Invalid end_line value: '{end_line}'. Must be an integer.") + return f"Error: Invalid end_line value '{end_line}'" # Split into lines lines = file_content.splitlines() @@ -2405,15 +2486,16 @@ Just reply with fixed versions of the {blocks} above that failed to match. # Convert 1-based line numbers to 0-based indices start_idx = start_line - 1 end_idx = end_line - 1 - # Validate line numbers if start_idx < 0 or start_idx >= len(lines): - self.io.tool_error(f"Start line {start_line} is out of range (file has {len(lines)} lines)") - return f"Error: Start line out of range" - + # Improve error message (Point 4) + self.io.tool_error(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).") + return f"Error: Start line {start_line} out of range" + if end_idx < start_idx or end_idx >= len(lines): - self.io.tool_error(f"End line {end_line} is out of range (must be >= start line and < {len(lines)})") - return f"Error: End line out of range" + # Improve error message (Point 4) + self.io.tool_error(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).") + return f"Error: End line {end_line} out of range" # Store original content for change tracking original_content = file_content @@ -2429,12 +2511,28 @@ Just reply with fixed versions of the {blocks} above that failed to match. if original_content == new_content_full: self.io.tool_warning("No changes made: new content is identical to original") return f"Warning: No changes made (new content identical to original)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content_full) - - # Track the change + + # Create a readable diff for the lines replacement + diff = f"Lines {start_line}-{end_line}:\n" + # Add removed lines with - prefix + for line in replaced_lines: + diff += f"- {line}\n" + # Add separator + diff += "---\n" + # Add new lines with + prefix + for line in new_lines: + diff += f"+ {line}\n" + + # Handle dry run (Implements Point 6) + if dry_run: + self.io.tool_output(f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}") + return f"Dry run: Would replace lines {start_line}-{end_line}. Diff:\n{diff}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content_full) + + # Track the change + try: metadata = { 'start_line': start_line, 'end_line': end_line, @@ -2449,44 +2547,37 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - replaced_count = end_line - start_line + 1 - new_count = len(new_lines) - - # Create a readable diff for the lines replacement - diff = f"Lines {start_line}-{end_line}:\n" - # Add removed lines with - prefix - for line in replaced_lines: - diff += f"- {line}\n" - # Add separator - diff += "---\n" - # Add new lines with + prefix - for line in new_lines: - diff += f"+ {line}\n" - - self.io.tool_output(f"✅ Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path} (change_id: {change_id})") - return f"Successfully replaced lines {start_line}-{end_line} with {new_count} new lines (change_id: {change_id}):\n{diff}" - else: - self.io.tool_output(f"Did not replace lines in {file_path} (--dry-run)") - return f"Did not replace lines (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for ReplaceLines: {track_e}") + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + replaced_count = end_line - start_line + 1 + new_count = len(new_lines) + + # Improve feedback (Point 6) + self.io.tool_output(f"✅ Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path} (change_id: {change_id})") + return f"Successfully replaced lines {start_line}-{end_line} with {new_count} new lines (change_id: {change_id}). Diff:\n{diff}" + except Exception as e: - self.io.tool_error(f"Error in ReplaceLines: {str(e)}") + self.io.tool_error(f"Error in ReplaceLines: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - def _execute_indent_lines(self, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, change_id=None): + def _execute_indent_lines(self, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False): """ Indent or unindent a block of lines in a file. Parameters: - file_path: Path to the file to modify - - start_pattern: Pattern marking the start of the block to indent - - end_pattern: Pattern marking the end of the block to indent - - line_count: Number of lines to indent (alternative to end_pattern) + - start_pattern: Pattern marking the start of the block to indent (line containing this pattern) + - end_pattern: Optional pattern marking the end of the block (line containing this pattern) + - line_count: Optional number of lines to indent (alternative to end_pattern) - indent_levels: Number of levels to indent (positive) or unindent (negative) + - near_context: Optional text nearby to help locate the correct instance of the start_pattern + - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - change_id: Optional ID for tracking the change - + - dry_run: If True, simulate the change without modifying the file + Returns a result message. """ try: @@ -2507,11 +2598,13 @@ Just reply with fixed versions of the {blocks} above that failed to match. else: self.io.tool_error(f"File '{file_path}' not in context") return f"Error: File not in context" - - # Read file content + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) file_content = self.io.read_text(abs_path) if file_content is None: - return f"Error reading file: {file_path}" + # Provide more specific error (Improves Point 4) + self.io.tool_error(f"Could not read file '{file_path}' before IndentLines operation.") + return f"Error: Could not read file '{file_path}'" # Validate we have either end_pattern or line_count, but not both if end_pattern and line_count: @@ -2521,69 +2614,131 @@ Just reply with fixed versions of the {blocks} above that failed to match. # Split into lines for easier handling lines = file_content.splitlines() original_content = file_content - - # Find the start line - start_line = -1 + + # Find occurrences of the start_pattern (Implements Point 5) + start_pattern_line_indices = [] for i, line in enumerate(lines): if start_pattern in line: - start_line = i - break - - if start_line == -1: - self.io.tool_error(f"Start pattern '{start_pattern}' not found in file") - return f"Error: Start pattern not found in file" - - # Find the end line + # If near_context is provided, check if it's nearby + if near_context: + context_window_start = max(0, i - 5) # Check 5 lines before/after + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + start_pattern_line_indices.append(i) + else: + start_pattern_line_indices.append(i) + + if not start_pattern_line_indices: + err_msg = f"Start pattern '{start_pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + + # Select the occurrence for the start pattern + num_occurrences = len(start_pattern_line_indices) + try: + occurrence = int(occurrence) # Ensure occurrence is an integer + if occurrence == -1: # Last occurrence + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based + else: + err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + except ValueError: + self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_line = start_pattern_line_indices[target_idx] + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" # For messages + # Find the end line based on end_pattern or line_count end_line = -1 if end_pattern: - for i in range(start_line + 1, len(lines)): + # Search for end_pattern *after* the selected start_line + for i in range(start_line, len(lines)): # Include start_line itself if start/end are same line if end_pattern in lines[i]: end_line = i break - + if end_line == -1: - self.io.tool_error(f"End pattern '{end_pattern}' not found after start pattern") - return f"Error: End pattern not found after start pattern" + # Improve error message (Point 4) + err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" elif line_count: - # Calculate end line based on start line and line count - end_line = min(start_line + line_count - 1, len(lines) - 1) + try: + line_count = int(line_count) + if line_count <= 0: + raise ValueError("Line count must be positive") + # Calculate end line based on start line and line count + end_line = min(start_line + line_count - 1, len(lines) - 1) + except ValueError: + self.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + return f"Error: Invalid line_count value '{line_count}'" else: - # If neither is specified, indent just the start line + # If neither end_pattern nor line_count is specified, indent just the start line end_line = start_line - - # Determine indentation amount (4 spaces per level) - indent_spaces = 4 * indent_levels - - # Apply indentation + # Determine indentation amount (using spaces for simplicity, could adapt based on file type later) + try: + indent_levels = int(indent_levels) + except ValueError: + self.io.tool_error(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.") + return f"Error: Invalid indent_levels value '{indent_levels}'" + + indent_str = ' ' * 4 # Assume 4 spaces per level + + # Create a temporary copy to calculate the change + modified_lines = list(lines) # Copy the list + + # Apply indentation to the temporary copy for i in range(start_line, end_line + 1): if indent_levels > 0: # Add indentation - lines[i] = ' ' * indent_spaces + lines[i] - else: + modified_lines[i] = (indent_str * indent_levels) + modified_lines[i] + elif indent_levels < 0: # Remove indentation, but do not remove more than exists - spaces_to_remove = min(abs(indent_spaces), len(lines[i]) - len(lines[i].lstrip())) - if spaces_to_remove > 0: - lines[i] = lines[i][spaces_to_remove:] - + spaces_to_remove = abs(indent_levels) * len(indent_str) + current_leading_spaces = len(modified_lines[i]) - len(modified_lines[i].lstrip(' ')) + actual_remove = min(spaces_to_remove, current_leading_spaces) + if actual_remove > 0: + modified_lines[i] = modified_lines[i][actual_remove:] + # If indent_levels is 0, do nothing + # Join lines back into a string - new_content = '\n'.join(lines) - + new_content = '\n'.join(modified_lines) # Use '\n' to match io.write_text behavior + if original_content == new_content: self.io.tool_warning(f"No changes made: indentation would not change file") return f"Warning: No changes made (indentation would not change file)" - - # Write the modified content back to the file - if not self.dry_run: - self.io.write_text(abs_path, new_content) - - # Track the change + + # Generate diff for feedback + diff_snippet = self._generate_diff_snippet_indent(original_content, new_content, start_line, end_line) + + # Handle dry run (Implements Point 6) + if dry_run: + action = "indent" if indent_levels > 0 else "unindent" + self.io.tool_output(f"Dry run: Would {action} lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") + return f"Dry run: Would {action} block. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + self.io.write_text(abs_path, new_content) + + # Track the change + try: metadata = { - 'start_line': start_line, - 'end_line': end_line, + 'start_line': start_line + 1, # Store 1-based + 'end_line': end_line + 1, # Store 1-based 'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count, - 'indent_levels': indent_levels + 'indent_levels': indent_levels, + 'near_context': near_context, + 'occurrence': occurrence, } change_id = self.change_tracker.track_change( file_path=rel_path, @@ -2593,19 +2748,22 @@ Just reply with fixed versions of the {blocks} above that failed to match. metadata=metadata, change_id=change_id ) - - self.aider_edited_files.add(rel_path) - action = "Indented" if indent_levels > 0 else "Unindented" - levels = abs(indent_levels) - level_text = "level" if levels == 1 else "levels" - self.io.tool_output(f"✅ {action} {end_line - start_line + 1} lines by {levels} {level_text} in {file_path} (change_id: {change_id})") - return f"Successfully {action.lower()} {end_line - start_line + 1} lines by {levels} {level_text} (change_id: {change_id})" - else: - self.io.tool_output(f"Did not indent lines in {file_path} (--dry-run)") - return f"Did not indent lines (--dry-run)" - + except Exception as track_e: + self.io.tool_error(f"Error tracking change for IndentLines: {track_e}") + change_id = "TRACKING_FAILED" + + self.aider_edited_files.add(rel_path) + + # Improve feedback (Point 5 & 6) + action = "Indented" if indent_levels > 0 else "Unindented" + levels = abs(indent_levels) + level_text = "level" if levels == 1 else "levels" + num_lines = end_line - start_line + 1 + self.io.tool_output(f"✅ {action} {num_lines} lines (from {occurrence_str}start pattern) by {levels} {level_text} in {file_path} (change_id: {change_id})") + return f"Successfully {action.lower()} {num_lines} lines by {levels} {level_text} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + except Exception as e: - self.io.tool_error(f"Error in IndentLines: {str(e)}") + self.io.tool_error(f"Error in IndentLines: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" def _execute_list_changes(self, file_path=None, limit=10): @@ -2643,10 +2801,191 @@ Just reply with fixed versions of the {blocks} above that failed to match. change_id = change['id'] result += f"{i+1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n" - - self.io.tool_output(result) + + self.io.tool_output(result) # Also print to console for user return result - + except Exception as e: - self.io.tool_error(f"Error in ListChanges: {str(e)}") + self.io.tool_error(f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" + + # ------------------- Diff Generation Helpers ------------------- + + def _generate_diff_snippet(self, original_content, start_index, replaced_len, replacement_text): + """Generate a git-style diff snippet for a simple text replacement.""" + try: + lines = original_content.splitlines() + char_count = 0 + start_line_idx = -1 + start_char_idx_in_line = -1 + + # Find the line and character index where the change starts + for i, line in enumerate(lines): + line_len_with_newline = len(line) + 1 # Account for newline character + if char_count + line_len_with_newline > start_index: + start_line_idx = i + start_char_idx_in_line = start_index - char_count + break + char_count += line_len_with_newline + + if start_line_idx == -1: return "[Diff generation error: start index out of bounds]" + + # Determine the end line and character index + end_index = start_index + replaced_len + char_count = 0 + end_line_idx = -1 + end_char_idx_in_line = -1 + for i, line in enumerate(lines): + line_len_with_newline = len(line) + 1 + if char_count + line_len_with_newline > end_index: + end_line_idx = i + # End char index is relative to the start of *its* line + end_char_idx_in_line = end_index - char_count + break + char_count += line_len_with_newline + # If end_index is exactly at the end of the content + if end_line_idx == -1 and end_index == len(original_content): + end_line_idx = len(lines) - 1 + end_char_idx_in_line = len(lines[end_line_idx]) + + if end_line_idx == -1: return "[Diff generation error: end index out of bounds]" + + # Get context lines + context = 3 + diff_start_line = max(0, start_line_idx - context) + diff_end_line = min(len(lines) - 1, end_line_idx + context) + + diff_lines = [f"@@ line ~{start_line_idx + 1} @@"] + for i in range(diff_start_line, diff_end_line + 1): + if i >= start_line_idx and i <= end_line_idx: + # Line is part of the original replaced block + diff_lines.append(f"- {lines[i]}") + else: + # Context line + diff_lines.append(f" {lines[i]}") + + # Construct the new lines based on the replacement + prefix = lines[start_line_idx][:start_char_idx_in_line] + suffix = lines[end_line_idx][end_char_idx_in_line:] + + # Combine prefix, replacement, and suffix, then split into lines + combined_new_content = prefix + replacement_text + suffix + new_content_lines = combined_new_content.splitlines() + + # Add new lines to diff + for new_line in new_content_lines: + diff_lines.append(f"+ {new_line}") + + return "\n".join(diff_lines) + except Exception as e: + return f"[Diff generation error: {e}]" + + def _generate_diff_chunks(self, original_content, find_text, replace_text): + """Generate multiple git-style diff snippets for ReplaceAll.""" + try: + lines = original_content.splitlines() + new_lines_content = original_content.replace(find_text, replace_text) + new_lines = new_lines_content.splitlines() + + # Use difflib for a more robust diff + import difflib + diff = list(difflib.unified_diff(lines, new_lines, lineterm='', n=3)) # n=3 lines of context + + if len(diff) <= 2: # Only header lines, no changes found by diff + return "No significant changes detected by diff." + + # Process the diff output into readable chunks + # Skip header lines (---, +++) + processed_diff = "\n".join(diff[2:]) + + # Limit the output size if it's too large + max_diff_len = 2000 # Limit diff snippet size + if len(processed_diff) > max_diff_len: + processed_diff = processed_diff[:max_diff_len] + "\n... (diff truncated)" + + return processed_diff if processed_diff else "No changes detected." + except Exception as e: + return f"[Diff generation error: {e}]" + + def _generate_diff_snippet_insert(self, original_content, insertion_line_idx, content_lines_to_insert): + """Generate a git-style diff snippet for an insertion.""" + try: + lines = original_content.splitlines() + context = 3 + + # Determine context range + start_context = max(0, insertion_line_idx - context) + end_context = min(len(lines), insertion_line_idx + context) # End index is exclusive for slicing + + diff_lines = [f"@@ line ~{insertion_line_idx + 1} @@"] # Indicate insertion point + + # Add lines before insertion point + for i in range(start_context, insertion_line_idx): + diff_lines.append(f" {lines[i]}") + + # Add inserted lines + for line in content_lines_to_insert: + diff_lines.append(f"+ {line}") + + # Add lines after insertion point + for i in range(insertion_line_idx, end_context): + diff_lines.append(f" {lines[i]}") + + return "\n".join(diff_lines) + except Exception as e: + return f"[Diff generation error: {e}]" + + def _generate_diff_snippet_delete(self, original_content, start_line, end_line): + """Generate a git-style diff snippet for a deletion.""" + try: + lines = original_content.splitlines() + context = 3 + + # Determine context range + diff_start_line = max(0, start_line - context) + diff_end_line = min(len(lines) - 1, end_line + context) + + diff_lines = [f"@@ line {start_line + 1},{end_line + 1} @@"] # Indicate deletion range + + for i in range(diff_start_line, diff_end_line + 1): + if i >= start_line and i <= end_line: + # Line was deleted + diff_lines.append(f"- {lines[i]}") + else: + # Context line + diff_lines.append(f" {lines[i]}") + + return "\n".join(diff_lines) + except Exception as e: + return f"[Diff generation error: {e}]" + + def _generate_diff_snippet_indent(self, original_content, new_content, start_line, end_line): + """Generate a git-style diff snippet for indentation changes.""" + try: + original_lines = original_content.splitlines() + new_lines = new_content.splitlines() + context = 3 + + # Determine context range + diff_start_line = max(0, start_line - context) + diff_end_line = min(len(original_lines) - 1, end_line + context) + + diff_lines_output = [f"@@ lines ~{start_line + 1}-{end_line + 1} @@"] # Indicate affected range + + for i in range(diff_start_line, diff_end_line + 1): + # Ensure index is valid for both lists (should be, as only indentation changes) + if i < len(original_lines) and i < len(new_lines): + if i >= start_line and i <= end_line: + # Line is within the indented/unindented block + if original_lines[i] != new_lines[i]: # Show only if changed + diff_lines_output.append(f"- {original_lines[i]}") + diff_lines_output.append(f"+ {new_lines[i]}") + else: # If somehow unchanged, show as context + diff_lines_output.append(f" {original_lines[i]}") + else: + # Context line + diff_lines_output.append(f" {original_lines[i]}") + + return "\n".join(diff_lines_output) + except Exception as e: + return f"[Diff generation error: {e}]" diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index e4592dfaa..f5ea0bc84 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -61,45 +61,32 @@ Act as an expert software engineer with the ability to autonomously navigate and Convert an editable file back to read-only status. ### Granular Editing Tools -- **ReplaceText**: `[tool_call(ReplaceText, file_path="path/to/file.py", find_text="old text", replace_text="new text", near_context="unique nearby text", occurrence=1)]` - Replace specific text with new text. Use near_context to disambiguate between multiple occurrences. - Set occurrence to -1 for the last occurrence, or a number for a specific occurrence. +- **ReplaceText**: `[tool_call(ReplaceText, file_path="...", find_text="...", replace_text="...", near_context="...", occurrence=1, dry_run=False)]` + Replace specific text. `near_context` (optional) helps find the right spot. `occurrence` (optional, default 1) specifies which match (-1 for last). `dry_run=True` simulates the change. -- **ReplaceAll**: `[tool_call(ReplaceAll, file_path="path/to/file.py", find_text="oldVar", replace_text="newVar")]` - Replace all occurrences of text in a file. Useful for renaming variables, function names, etc. +- **ReplaceAll**: `[tool_call(ReplaceAll, file_path="...", find_text="...", replace_text="...", dry_run=False)]` + Replace ALL occurrences of text. Use with caution. `dry_run=True` simulates the change. -- **InsertBlock**: `[tool_call(InsertBlock, file_path="path/to/file.py", content=""" -def new_function(): - return True -""", after_pattern="# Insert after this line")]` - Insert a block of text after or before a pattern. Use single quotes with escaped newlines for multi-line content. - Specify either after_pattern or before_pattern to place the block. +- **InsertBlock**: `[tool_call(InsertBlock, file_path="...", content="...", after_pattern="...", near_context="...", occurrence=1, dry_run=False)]` + Insert a block after (`after_pattern`) or before (`before_pattern`) a pattern line. Use `near_context` and `occurrence` (optional, default 1, -1 for last) to specify which pattern match. `dry_run=True` simulates. -- **DeleteBlock**: `[tool_call(DeleteBlock, file_path="path/to/file.py", start_pattern="def old_function", end_pattern="# End function")]` - Delete a block of text from start_pattern to end_pattern (inclusive). - Alternatively, use line_count instead of end_pattern to delete a specific number of lines. +- **DeleteBlock**: `[tool_call(DeleteBlock, file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]` + Delete block from `start_pattern` line to `end_pattern` line (inclusive). Use `line_count` instead of `end_pattern` for fixed number of lines. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. -- **ReplaceLine**: `[tool_call(ReplaceLine, file_path="path/to/file.py", line_number=42, new_content="def fixed_function(param):")]` - Replace a specific line by its line number. Especially useful for fixing errors or lint warnings that include line numbers. - Line numbers are 1-based (as in most editors and error messages). +- **ReplaceLine**: `[tool_call(ReplaceLine, file_path="...", line_number=42, new_content="...", dry_run=False)]` + Replace a specific line number (1-based). `dry_run=True` simulates. -- **ReplaceLines**: `[tool_call(ReplaceLines, file_path="path/to/file.py", start_line=42, end_line=45, new_content=""" -def better_function(param): - # Fixed implementation - return process(param) -""")]` - Replace a range of lines by line numbers. Useful for fixing multiple lines referenced in error messages. - The new_content can contain any number of lines, not just the same count as the original range. +- **ReplaceLines**: `[tool_call(ReplaceLines, file_path="...", start_line=42, end_line=45, new_content="...", dry_run=False)]` + Replace a range of lines (1-based, inclusive). `dry_run=True` simulates. -- **IndentLines**: `[tool_call(IndentLines, file_path="path/to/file.py", start_pattern="def my_function", end_pattern="return result", indent_levels=1)]` - Indent or unindent a block of lines. Use positive indent_levels to increase indentation or negative to decrease. - Specify either end_pattern or line_count to determine the range of lines to indent. +- **IndentLines**: `[tool_call(IndentLines, file_path="...", start_pattern="...", end_pattern="...", indent_levels=1, near_context="...", occurrence=1, dry_run=False)]` + Indent (`indent_levels` > 0) or unindent (`indent_levels` < 0) a block. Use `end_pattern` or `line_count` for range. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. -- **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` - Undo a specific change by its ID. Alternatively, use last_file="path/to/file.py" to undo the most recent change to that file. +- **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, last_file="...")]` + Undo a specific change by ID, or the last change made to `last_file`. -- **ListChanges**: `[tool_call(ListChanges, file_path="path/to/file.py", limit=5)]` - List recent changes made to files. Optionally filter by file_path and limit the number of results. +- **ListChanges**: `[tool_call(ListChanges, file_path="...", limit=5)]` + List recent changes, optionally filtered by `file_path` and limited. ### Other Tools - **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` @@ -129,11 +116,12 @@ When you include any tool call, the system will automatically continue to the ne - Remember the `Find` tool is optimized for locating symbols across the codebase ### Granular Editing Workflow -1. **Discover and Add Files**: Use Glob, Grep, Find to locate relevant files -2. **Make Files Editable**: Convert read-only files to editable with MakeEditable -3. **Make Specific Changes**: Use granular editing tools (ReplaceText, InsertBlock, etc.) for precise edits -4. **Review Changes**: List applied changes with ListChanges -5. **Fix Mistakes**: If needed, undo changes with UndoChange by specific ID or last change to a file +1. **Discover and Add Files**: Use Glob, Grep, Find to locate relevant files. +2. **Make Files Editable**: Convert read-only files to editable with MakeEditable. +3. **(Optional) Dry Run**: For risky changes (like ReplaceAll or complex blocks), use the tool with `dry_run=True` first to see the potential impact. Review the diff snippet provided in the result. +4. **Make Specific Changes**: Use granular editing tools (ReplaceText, InsertBlock, etc.) with `dry_run=False` (or omit it) for precise edits. Review the diff snippet in the result. +5. **Review Changes**: List applied changes with ListChanges. +6. **Fix Mistakes**: If a change was incorrect, undo it using UndoChange with the specific `change_id` from the result message or ListChanges. ### Context Management Strategy - Keep your context focused by removing files that are no longer relevant From 643442019add4206ccd82dde284f902ead236099 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 03:17:30 -0400 Subject: [PATCH 09/63] Encourage the LLM to use dry_run in some cases --- aider/coders/navigator_prompts.py | 33 ++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index f5ea0bc84..d785fa199 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -115,13 +115,23 @@ When you include any tool call, the system will automatically continue to the ne - Target specific patterns rather than overly broad searches - Remember the `Find` tool is optimized for locating symbols across the codebase -### Granular Editing Workflow -1. **Discover and Add Files**: Use Glob, Grep, Find to locate relevant files. -2. **Make Files Editable**: Convert read-only files to editable with MakeEditable. -3. **(Optional) Dry Run**: For risky changes (like ReplaceAll or complex blocks), use the tool with `dry_run=True` first to see the potential impact. Review the diff snippet provided in the result. -4. **Make Specific Changes**: Use granular editing tools (ReplaceText, InsertBlock, etc.) with `dry_run=False` (or omit it) for precise edits. Review the diff snippet in the result. -5. **Review Changes**: List applied changes with ListChanges. -6. **Fix Mistakes**: If a change was incorrect, undo it using UndoChange with the specific `change_id` from the result message or ListChanges. +## Granular Editing Workflow + +**Note on Sequential Edits:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. Always check the result message and diff snippet after each edit. + +1. **Discover and Add Files**: Use Glob, Grep, Find to locate relevant files. +2. **Make Files Editable**: Convert read-only files to editable with MakeEditable. +3. **Apply Edits (Default: Direct Edit)**: + * For most edits where you are confident in the parameters (file path, patterns, line numbers), apply the change directly using the tool with `dry_run=False` (or omitting the parameter). + * **Crucially, always review the diff snippet provided in the `[Result (ToolName): ...]` message** to confirm the change was applied correctly and in the intended location. +4. **(Optional) Use `dry_run=True` for Higher Risk:** Consider using `dry_run=True` *before* applying the actual edit if the situation involves higher risk, such as: + * Using `ReplaceAll`, especially with potentially common search text. + * Using pattern-based tools (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ReplaceText`) where the pattern might occur multiple times and `near_context`/`occurrence` might not guarantee targeting the correct instance. + * Using line-number based tools (`ReplaceLine`, `ReplaceLines`) *after* other edits have already been made to the *same file* within the *same message*, as line numbers might have shifted unexpectedly. + * If using `dry_run=True`, review the simulated diff in the result. If it looks correct, issue the *exact same tool call* again with `dry_run=False` (or omitted). +5. **Review and Recover:** + * Use `ListChanges` to see a history of applied changes. + * If you review a result diff (from a direct edit) and find the change was incorrect or applied in the wrong place, use `[tool_call(UndoChange, change_id="...")]` in your *next* message, using the `change_id` provided in the result message. Then, attempt the corrected edit. ### Context Management Strategy - Keep your context focused by removing files that are no longer relevant @@ -221,10 +231,11 @@ NOTE that this uses four backticks as the fence and not three! {quad_backtick_reminder} ### Error Handling -- If tools return errors or unexpected results, try alternative approaches -- Refine search patterns if results are too broad or too narrow -- Use the enhanced context blocks (directory structure and git status) to orient yourself -- Use ListChanges to see what edits have been made and UndoChange to revert mistakes +- If a tool call returns an error message, analyze the error and try correcting the tool call parameters. +- If a tool call succeeds but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects), use `[tool_call(UndoChange, change_id="...")]` in your next message to revert it before attempting a corrected version. +- Refine search patterns or use `near_context`/`occurrence` if edits affect the wrong location. +- Use the enhanced context blocks (directory structure and git status) to re-orient yourself if needed. +- Use `ListChanges` to review the sequence of successful changes. Always reply to the user in {language}. From dea5bd54f29904039ad5705f43155245e5d127dc Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 03:22:01 -0400 Subject: [PATCH 10/63] Add a tool to extract lines from a file into others --- aider/coders/navigator_coder.py | 239 +++++++++++++++++++++++++++++- aider/coders/navigator_prompts.py | 4 + 2 files changed, 242 insertions(+), 1 deletion(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 5155318dc..8f8dcb616 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -701,7 +701,25 @@ class NavigatorCoder(Coder): limit = params.get('limit', 10) result_message = self._execute_list_changes(file_path, limit) - + + elif norm_tool_name == 'extractlines': + source_file_path = params.get('source_file_path') + target_file_path = params.get('target_file_path') + start_pattern = params.get('start_pattern') + end_pattern = params.get('end_pattern') + line_count = params.get('line_count') + near_context = params.get('near_context') + occurrence = params.get('occurrence', 1) + dry_run = params.get('dry_run', False) + + if source_file_path and target_file_path and start_pattern: + result_message = self._execute_extract_lines( + source_file_path, target_file_path, start_pattern, end_pattern, + line_count, near_context, occurrence, dry_run + ) + else: + result_message = "Error: Missing required parameters for ExtractLines (source_file_path, target_file_path, start_pattern)" + else: result_message = f"Error: Unknown tool name '{tool_name}'" @@ -2809,6 +2827,225 @@ Just reply with fixed versions of the {blocks} above that failed to match. self.io.tool_error(f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" + def _execute_extract_lines(self, source_file_path, target_file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, dry_run=False): + """ + Extract a range of lines from a source file and move them to a target file. + + Parameters: + - source_file_path: Path to the file to extract lines from + - target_file_path: Path to the file to append extracted lines to (will be created if needed) + - start_pattern: Pattern marking the start of the block to extract + - end_pattern: Optional pattern marking the end of the block + - line_count: Optional number of lines to extract (alternative to end_pattern) + - near_context: Optional text nearby to help locate the correct instance of the start_pattern + - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) + - dry_run: If True, simulate the change without modifying files + + Returns a result message. + """ + try: + # --- Validate Source File --- + abs_source_path = self.abs_root_path(source_file_path) + rel_source_path = self.get_rel_fname(abs_source_path) + + if not os.path.isfile(abs_source_path): + self.io.tool_error(f"Source file '{source_file_path}' not found") + return f"Error: Source file not found" + + if abs_source_path not in self.abs_fnames: + if abs_source_path in self.abs_read_only_fnames: + self.io.tool_error(f"Source file '{source_file_path}' is read-only. Use MakeEditable first.") + return f"Error: Source file is read-only. Use MakeEditable first." + else: + self.io.tool_error(f"Source file '{source_file_path}' not in context") + return f"Error: Source file not in context" + + # --- Validate Target File --- + abs_target_path = self.abs_root_path(target_file_path) + rel_target_path = self.get_rel_fname(abs_target_path) + target_exists = os.path.isfile(abs_target_path) + target_is_editable = abs_target_path in self.abs_fnames + target_is_readonly = abs_target_path in self.abs_read_only_fnames + + if target_exists and not target_is_editable: + if target_is_readonly: + self.io.tool_error(f"Target file '{target_file_path}' exists but is read-only. Use MakeEditable first.") + return f"Error: Target file exists but is read-only. Use MakeEditable first." + else: + # This case shouldn't happen if file exists, but handle defensively + self.io.tool_error(f"Target file '{target_file_path}' exists but is not in context. Add it first.") + return f"Error: Target file exists but is not in context." + + # --- Read Source Content --- + source_content = self.io.read_text(abs_source_path) + if source_content is None: + self.io.tool_error(f"Could not read source file '{source_file_path}' before ExtractLines operation.") + return f"Error: Could not read source file '{source_file_path}'" + + # --- Find Extraction Range --- + if end_pattern and line_count: + self.io.tool_error("Cannot specify both end_pattern and line_count") + return "Error: Cannot specify both end_pattern and line_count" + + source_lines = source_content.splitlines() + original_source_content = source_content + + start_pattern_line_indices = [] + for i, line in enumerate(source_lines): + if start_pattern in line: + if near_context: + context_window_start = max(0, i - 5) + context_window_end = min(len(source_lines), i + 6) + context_block = "\n".join(source_lines[context_window_start:context_window_end]) + if near_context in context_block: + start_pattern_line_indices.append(i) + else: + start_pattern_line_indices.append(i) + + if not start_pattern_line_indices: + err_msg = f"Start pattern '{start_pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in source file '{source_file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" + + num_occurrences = len(start_pattern_line_indices) + try: + occurrence = int(occurrence) + if occurrence == -1: + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 + else: + err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{source_file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" + except ValueError: + self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_line = start_pattern_line_indices[target_idx] + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + + end_line = -1 + if end_pattern: + for i in range(start_line, len(source_lines)): + if end_pattern in source_lines[i]: + end_line = i + break + if end_line == -1: + err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{source_file_path}'." + self.io.tool_error(err_msg) + return f"Error: {err_msg}" + elif line_count: + try: + line_count = int(line_count) + if line_count <= 0: raise ValueError("Line count must be positive") + end_line = min(start_line + line_count - 1, len(source_lines) - 1) + except ValueError: + self.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + return f"Error: Invalid line_count value '{line_count}'" + else: + end_line = start_line # Extract just the start line if no end specified + + # --- Prepare Content Changes --- + extracted_lines = source_lines[start_line:end_line+1] + new_source_lines = source_lines[:start_line] + source_lines[end_line+1:] + new_source_content = '\n'.join(new_source_lines) + + target_content = "" + if target_exists: + target_content = self.io.read_text(abs_target_path) + if target_content is None: + self.io.tool_error(f"Could not read existing target file '{target_file_path}'.") + return f"Error: Could not read target file '{target_file_path}'" + original_target_content = target_content # For tracking + + # Append extracted lines to target content, ensuring a newline if target wasn't empty + extracted_block = '\n'.join(extracted_lines) + if target_content and not target_content.endswith('\n'): + target_content += '\n' # Add newline before appending if needed + new_target_content = target_content + extracted_block + + # --- Generate Diffs --- + source_diff_snippet = self._generate_diff_snippet_delete(original_source_content, start_line, end_line) + target_insertion_line = len(target_content.splitlines()) if target_content else 0 + target_diff_snippet = self._generate_diff_snippet_insert(original_target_content, target_insertion_line, extracted_lines) + + # --- Handle Dry Run --- + # --- Handle Dry Run --- + if dry_run: + num_extracted = end_line - start_line + 1 + target_action = "append to" if target_exists else "create" + self.io.tool_output(f"Dry run: Would extract {num_extracted} lines (from {occurrence_str}start pattern '{start_pattern}') in {source_file_path} and {target_action} {target_file_path}") + # Provide more informative dry run response with diffs + return ( + f"Dry run: Would extract {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n" + f"Source Diff (Deletion):\n{source_diff_snippet}\n" + f"Target Diff (Insertion):\n{target_diff_snippet}" + ) + + # --- Apply Changes (Not Dry Run) --- + self.io.write_text(abs_source_path, new_source_content) + self.io.write_text(abs_target_path, new_target_content) + + # --- Track Changes --- + source_change_id = "TRACKING_FAILED" + target_change_id = "TRACKING_FAILED" + try: + source_metadata = { + 'start_line': start_line + 1, 'end_line': end_line + 1, + 'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count, + 'near_context': near_context, 'occurrence': occurrence, + 'extracted_content': extracted_block, 'target_file': rel_target_path + } + source_change_id = self.change_tracker.track_change( + file_path=rel_source_path, change_type='extractlines_source', + original_content=original_source_content, new_content=new_source_content, + metadata=source_metadata + ) + except Exception as track_e: + self.io.tool_error(f"Error tracking source change for ExtractLines: {track_e}") + + try: + target_metadata = { + 'insertion_line': target_insertion_line + 1, + 'inserted_content': extracted_block, 'source_file': rel_source_path + } + target_change_id = self.change_tracker.track_change( + file_path=rel_target_path, change_type='extractlines_target', + original_content=original_target_content, new_content=new_target_content, + metadata=target_metadata + ) + except Exception as track_e: + self.io.tool_error(f"Error tracking target change for ExtractLines: {track_e}") + + # --- Update Context --- + self.aider_edited_files.add(rel_source_path) + self.aider_edited_files.add(rel_target_path) + if not target_exists: + # Add the newly created file to editable context + self.abs_fnames.add(abs_target_path) + self.io.tool_output(f"✨ Created and added '{target_file_path}' to editable context.") + + # --- Return Result --- + num_extracted = end_line - start_line + 1 + target_action = "appended to" if target_exists else "created" + self.io.tool_output(f"✅ Extracted {num_extracted} lines from {rel_source_path} (change_id: {source_change_id}) and {target_action} {rel_target_path} (change_id: {target_change_id})") + # Provide more informative success response with change IDs and diffs + return ( + f"Successfully extracted {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n" + f"Source Change ID: {source_change_id}\nSource Diff (Deletion):\n{source_diff_snippet}\n" + f"Target Change ID: {target_change_id}\nTarget Diff (Insertion):\n{target_diff_snippet}" + ) + + except Exception as e: + self.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" + + # ------------------- Diff Generation Helpers ------------------- def _generate_diff_snippet(self, original_content, start_index, replaced_len, replacement_text): diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index d785fa199..997dd49a4 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -88,6 +88,10 @@ Act as an expert software engineer with the ability to autonomously navigate and - **ListChanges**: `[tool_call(ListChanges, file_path="...", limit=5)]` List recent changes, optionally filtered by `file_path` and limited. +- **ExtractLines**: `[tool_call(ExtractLines, source_file_path="...", target_file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]` + Extract lines from `start_pattern` to `end_pattern` (or use `line_count`) in `source_file_path` and move them to `target_file_path`. Creates `target_file_path` if it doesn't exist. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. + *Useful for refactoring by moving functions, classes, or blocks of code into separate files.* + ### Other Tools - **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` Execute a shell command. Requires user confirmation. From bbc16ca60a0f416388d5730831f8167dca50e050 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 04:32:28 -0400 Subject: [PATCH 11/63] Make it clearer that Add commands view, not adding --- aider/coders/navigator_coder.py | 101 +++++++++++++++--------------- aider/coders/navigator_prompts.py | 34 +++++----- 2 files changed, 67 insertions(+), 68 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 8f8dcb616..9893929ba 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -522,31 +522,31 @@ class NavigatorCoder(Coder): # Normalize tool name for case-insensitive matching norm_tool_name = tool_name.lower() - if norm_tool_name == 'glob': + if norm_tool_name == 'viewfilesatglob': pattern = params.get('pattern') if pattern is not None: - result_message = self._execute_glob(pattern) + result_message = self._execute_view_files_at_glob(pattern) else: - result_message = "Error: Missing 'pattern' parameter for Glob" - elif norm_tool_name == 'grep': + result_message = "Error: Missing 'pattern' parameter for ViewFilesAtGlob" + elif norm_tool_name == 'viewfilesmatching': pattern = params.get('pattern') file_pattern = params.get('file_pattern') # Optional if pattern is not None: - result_message = self._execute_grep(pattern, file_pattern) + result_message = self._execute_view_files_matching(pattern, file_pattern) else: - result_message = "Error: Missing 'pattern' parameter for Grep" + result_message = "Error: Missing 'pattern' parameter for ViewFilesMatching" elif norm_tool_name == 'ls': directory = params.get('directory') if directory is not None: result_message = self._execute_ls(directory) else: result_message = "Error: Missing 'directory' parameter for Ls" - elif norm_tool_name == 'add': + elif norm_tool_name == 'view': file_path = params.get('file_path') if file_path is not None: - result_message = self._execute_add(file_path) + result_message = self._execute_view(file_path) else: - result_message = "Error: Missing 'file_path' parameter for Add" + result_message = "Error: Missing 'file_path' parameter for View" elif norm_tool_name == 'remove': file_path = params.get('file_path') if file_path is not None: @@ -565,12 +565,12 @@ class NavigatorCoder(Coder): result_message = self._execute_make_readonly(file_path) else: result_message = "Error: Missing 'file_path' parameter for MakeReadonly" - elif norm_tool_name == 'find': + elif norm_tool_name == 'viewfileswithsymbol': symbol = params.get('symbol') if symbol is not None: - result_message = self._execute_find(symbol) + result_message = self._execute_view_files_with_symbol(symbol) else: - result_message = "Error: Missing 'symbol' parameter for Find" + result_message = "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" elif norm_tool_name == 'command': command_string = params.get('command_string') if command_string is not None: @@ -916,10 +916,10 @@ Just reply with fixed versions of the {blocks} above that failed to match. return edited_files - def _execute_glob(self, pattern): + def _execute_view_files_at_glob(self, pattern): """ - Execute a glob pattern and add matching files to context. - + Execute a glob pattern and add matching files to context as read-only. + This tool helps the LLM find files by pattern matching, similar to how a developer would use glob patterns to find files. """ @@ -965,13 +965,13 @@ Just reply with fixed versions of the {blocks} above that failed to match. self.io.tool_output(f"⚠️ No files found matching '{pattern}'") return f"No files found matching '{pattern}'" except Exception as e: - self.io.tool_error(f"Error in glob: {str(e)}") + self.io.tool_error(f"Error in ViewFilesAtGlob: {str(e)}") return f"Error: {str(e)}" - - def _execute_grep(self, search_pattern, file_pattern=None): + + def _execute_view_files_matching(self, search_pattern, file_pattern=None): """ - Search for pattern in files and add matching files to context. - + Search for pattern in files and add matching files to context as read-only. + This tool lets the LLM search for content within files, mimicking how a developer would use grep to find relevant code. """ @@ -1034,9 +1034,9 @@ Just reply with fixed versions of the {blocks} above that failed to match. self.io.tool_output(f"⚠️ Pattern '{search_pattern}' not found in any files") return f"Pattern not found in any files" except Exception as e: - self.io.tool_error(f"Error in grep: {str(e)}") + self.io.tool_error(f"Error in ViewFilesMatching: {str(e)}") return f"Error: {str(e)}" - + def _execute_ls(self, dir_path): """ List files in directory and optionally add some to context. @@ -1083,27 +1083,28 @@ Just reply with fixed versions of the {blocks} above that failed to match. except Exception as e: self.io.tool_error(f"Error in ls: {str(e)}") return f"Error: {str(e)}" - - def _execute_add(self, file_path): + + def _execute_view(self, file_path): """ Explicitly add a file to context as read-only. - - This gives the LLM explicit control over what files to add, + + This gives the LLM explicit control over what files to view, rather than relying on indirect mentions. """ try: - return self._add_file_to_context(file_path, True) + # Use the helper, marking it as an explicit view request + return self._add_file_to_context(file_path, explicit=True) except Exception as e: - self.io.tool_error(f"Error adding file: {str(e)}") + self.io.tool_error(f"Error viewing file: {str(e)}") return f"Error: {str(e)}" - + def _add_file_to_context(self, file_path, explicit=False): """ Helper method to add a file to context as read-only. - + Parameters: - file_path: Path to the file to add - - explicit: Whether this was an explicit add command (vs. implicit through glob/grep) + - explicit: Whether this was an explicit view command (vs. implicit through ViewFilesAtGlob/ViewFilesMatching) """ # Check if file exists abs_path = self.abs_root_path(file_path) @@ -1144,22 +1145,22 @@ Just reply with fixed versions of the {blocks} above that failed to match. # Add to read-only files self.abs_read_only_fnames.add(abs_path) - + # Track in exploration set self.files_added_in_exploration.add(rel_path) - + # Inform user if explicit: - self.io.tool_output(f"📎 Added '{file_path}' to context as read-only") - return f"Added file to context as read-only" + self.io.tool_output(f"📎 Viewed '{file_path}' (added to context as read-only)") + return f"Viewed file (added to context as read-only)" else: - # For implicit adds (from glob/grep), just return success + # For implicit adds (from ViewFilesAtGlob/ViewFilesMatching), just return success return f"Added file to context as read-only" - + except Exception as e: - self.io.tool_error(f"Error adding file '{file_path}': {str(e)}") - return f"Error adding file: {str(e)}" - + self.io.tool_error(f"Error adding file '{file_path}' for viewing: {str(e)}") + return f"Error adding file for viewing: {str(e)}" + def _execute_make_editable(self, file_path): """ Convert a read-only file to an editable file. @@ -1266,17 +1267,17 @@ Just reply with fixed versions of the {blocks} above that failed to match. self.io.tool_error(f"Error removing file: {str(e)}") return f"Error: {str(e)}" - def _execute_find(self, symbol): + def _execute_view_files_with_symbol(self, symbol): """ Find files containing a specific symbol and add them to context as read-only. """ try: if not self.repo_map: - self.io.tool_output("⚠️ Repo map not available, cannot use Find tool.") + self.io.tool_output("⚠️ Repo map not available, cannot use ViewFilesWithSymbol tool.") return "Repo map not available" if not symbol: - return "Error: Missing 'symbol' parameter for Find" + return "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" self.io.tool_output(f"🔎 Searching for symbol '{symbol}'...") found_files = set() @@ -1337,7 +1338,7 @@ Just reply with fixed versions of the {blocks} above that failed to match. return f"Symbol '{symbol}' not found in searchable files." except Exception as e: - self.io.tool_error(f"Error in find: {str(e)}") + self.io.tool_error(f"Error in ViewFilesWithSymbol: {str(e)}") return f"Error: {str(e)}" def _execute_command(self, command_string): @@ -1407,19 +1408,17 @@ Just reply with fixed versions of the {blocks} above that failed to match. # Get new files to add (not already in context) new_files = mentioned_files - current_files - # In navigator mode, we *only* add files via explicit tool commands. + # In navigator mode, we *only* add files via explicit tool commands (`View`, `ViewFilesAtGlob`, etc.). # Do nothing here for implicit mentions. pass - - - - + + def check_for_file_mentions(self, content): """ Override parent's method to use our own file processing logic. - + Override parent's method to disable implicit file mention handling in navigator mode. - Files should only be added via explicit tool commands (`Add`, `Glob`, `Grep`). + Files should only be added via explicit tool commands (`View`, `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`). """ # Do nothing - disable implicit file adds in navigator mode. pass diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 997dd49a4..9f2f8fd1d 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -31,23 +31,23 @@ Act as an expert software engineer with the ability to autonomously navigate and ## Available Tools ### File Discovery Tools -- **Glob**: `[tool_call(Glob, pattern="**/*.py")]` +- **ViewFilesAtGlob**: `[tool_call(ViewFilesAtGlob, pattern="**/*.py")]` Find files matching a glob pattern and add them to context as read-only. Supports patterns like "src/**/*.ts" or "*.json". -- **Grep**: `[tool_call(Grep, pattern="class User", file_pattern="*.py")]` +- **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py")]` Search for text in files and add matching files to context as read-only. Files with more matches are prioritized. `file_pattern` is optional. - **Ls**: `[tool_call(Ls, directory="src/components")]` List files in a directory. Useful for exploring the project structure. -- **Find**: `[tool_call(Find, symbol="my_function")]` +- **ViewFilesWithSymbol**: `[tool_call(ViewFilesWithSymbol, symbol="my_function")]` Find files containing a specific symbol (function, class, variable) and add them to context as read-only. Leverages the repo map for accurate symbol lookup. ### Context Management Tools -- **Add**: `[tool_call(Add, file_path="src/main.py")]` +- **View**: `[tool_call(View, file_path="src/main.py")]` Explicitly add a specific file to context as read-only. - **Remove**: `[tool_call(Remove, file_path="tests/old_test.py")]` @@ -104,8 +104,8 @@ When you include any tool call, the system will automatically continue to the ne ## Navigation Workflow ### Exploration Strategy -1. **Initial Discovery**: Use `Glob`, `Grep`, `Ls`, or `Find` to identify relevant files -2. **Focused Investigation**: Add promising files to context with `Add` +1. **Initial Discovery**: Use `ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, or `ViewFilesWithSymbol` to identify relevant files +2. **Focused Investigation**: Add promising files to context with `View` 3. **Context Management**: Remove irrelevant files with `Remove` to maintain focus 4. **Preparation for Editing**: Convert files to editable with `MakeEditable` when needed 5. **Continued Exploration**: Include any tool call to automatically continue to the next round @@ -114,16 +114,16 @@ When you include any tool call, the system will automatically continue to the ne ### Tool Usage Best Practices - Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution - Tool names are case-insensitive; parameters can be unquoted or quoted -- Verify files aren't already in context before adding them -- Use precise grep patterns with file_pattern to narrow search scope +- Verify files aren't already in context before adding them with `View` +- Use precise search patterns with `ViewFilesMatching` and `file_pattern` to narrow scope - Target specific patterns rather than overly broad searches -- Remember the `Find` tool is optimized for locating symbols across the codebase +- Remember the `ViewFilesWithSymbol` tool is optimized for locating symbols across the codebase ## Granular Editing Workflow **Note on Sequential Edits:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. Always check the result message and diff snippet after each edit. -1. **Discover and Add Files**: Use Glob, Grep, Find to locate relevant files. +1. **Discover and View Files**: Use `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol` to locate relevant files. Use `View` to add specific files. 2. **Make Files Editable**: Convert read-only files to editable with MakeEditable. 3. **Apply Edits (Default: Direct Edit)**: * For most edits where you are confident in the parameters (file path, patterns, line numbers), apply the change directly using the tool with `dry_run=False` (or omitting the parameter). @@ -255,19 +255,19 @@ Always reply to the user in {language}. role="assistant", content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files. -[tool_call(Grep, pattern="login|auth|password|session", file_pattern="*.py")]""", +[tool_call(ViewFilesMatching, pattern="login|auth|password|session", file_pattern="*.py")]""", ), dict( role="user", - content="[Result (Grep): Found in 3 files: auth/models.py (12 matches), auth/views.py (8 matches), users/authentication.py (5 matches)]", + content="[Result (ViewFilesMatching): Found in 3 files: auth/models.py (12 matches), auth/views.py (8 matches), users/authentication.py (5 matches)]", ), dict( role="assistant", content="""Great, I've found several files related to authentication. Let me examine them to understand the login flow. -[tool_call(Add, file_path="auth/models.py")] -[tool_call(Add, file_path="auth/views.py")] -[tool_call(Add, file_path="users/authentication.py")]""", +[tool_call(View, file_path="auth/models.py")] +[tool_call(View, file_path="auth/views.py")] +[tool_call(View, file_path="users/authentication.py")]""", ), dict( role="user", @@ -306,11 +306,11 @@ Trust this message as the true contents of the files! files_no_full_files_with_repo_map = """ I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet. -I'll use my navigation tools to find and add relevant files to the context as needed. +I'll use my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to the context as needed. """ - files_no_full_files_with_repo_map_reply = """I understand. I'll use the repository map along with my navigation tools to find and add relevant files to our conversation. + files_no_full_files_with_repo_map_reply = """I understand. I'll use the repository map along with my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to our conversation. """ repo_content_prefix = """ From 9e1900a126f24724fc937c6cda516a0ceb7ca64d Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 04:44:20 -0400 Subject: [PATCH 12/63] MakeEditable: Improve handling of file states and output --- aider/coders/navigator_coder.py | 43 +++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 9893929ba..92ed371e8 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -1173,23 +1173,36 @@ Just reply with fixed versions of the {blocks} above that failed to match. abs_path = self.abs_root_path(file_path) rel_path = self.get_rel_fname(abs_path) - # Check if file is in read-only context - if abs_path not in self.abs_read_only_fnames: - if abs_path in self.abs_fnames: - self.io.tool_output(f"📝 File '{file_path}' is already editable") - return f"File is already editable" - else: - self.io.tool_output(f"⚠️ File '{file_path}' not in context") - return f"File not in context" - - # Move from read-only to editable - self.abs_read_only_fnames.remove(abs_path) + # Check if file is already editable + if abs_path in self.abs_fnames: + self.io.tool_output(f"📝 File '{file_path}' is already editable") + return f"File is already editable" + + # Check if file exists on disk + if not os.path.isfile(abs_path): + self.io.tool_output(f"⚠️ File '{file_path}' not found") + return f"Error: File not found" + + # File exists, is not editable, might be read-only or not in context yet + was_read_only = False + if abs_path in self.abs_read_only_fnames: + self.abs_read_only_fnames.remove(abs_path) + was_read_only = True + + # Add to editable files self.abs_fnames.add(abs_path) - - self.io.tool_output(f"📝 Made '{file_path}' editable") - return f"File is now editable" + + if was_read_only: + self.io.tool_output(f"📝 Moved '{file_path}' from read-only to editable") + return f"File is now editable (moved from read-only)" + else: + # File was not previously in context at all + self.io.tool_output(f"📝 Added '{file_path}' directly to editable context") + # Track if added during exploration? Maybe not needed for direct MakeEditable. + # self.files_added_in_exploration.add(rel_path) # Consider if needed + return f"File is now editable (added directly)" except Exception as e: - self.io.tool_error(f"Error making file editable: {str(e)}") + self.io.tool_error(f"Error in MakeEditable for '{file_path}': {str(e)}") return f"Error: {str(e)}" def _execute_make_readonly(self, file_path): From dfd248245c0521a26868f62724f3af3f66ad9890 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 04:55:00 -0400 Subject: [PATCH 13/63] Add CommandInteractive tool --- aider/coders/navigator_coder.py | 49 +++++++++++++++++++++++++++++-- aider/coders/navigator_prompts.py | 4 ++- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 92ed371e8..6edf488b4 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -18,8 +18,8 @@ from .editblock_coder import find_original_update_blocks, do_replace, find_simil from .navigator_prompts import NavigatorPrompts from aider.repo import ANY_GIT_ERROR from aider import urls -# Import run_cmd_subprocess directly for non-interactive execution -from aider.run_cmd import run_cmd_subprocess +# Import run_cmd for potentially interactive execution and run_cmd_subprocess for guaranteed non-interactive +from aider.run_cmd import run_cmd, run_cmd_subprocess # Import the change tracker from aider.change_tracker import ChangeTracker @@ -577,7 +577,13 @@ class NavigatorCoder(Coder): result_message = self._execute_command(command_string) else: result_message = "Error: Missing 'command_string' parameter for Command" - + elif norm_tool_name == 'commandinteractive': + command_string = params.get('command_string') + if command_string is not None: + result_message = self._execute_command_interactive(command_string) + else: + result_message = "Error: Missing 'command_string' parameter for CommandInteractive" + # Granular editing tools elif norm_tool_name == 'replacetext': file_path = params.get('file_path') @@ -1407,6 +1413,43 @@ Just reply with fixed versions of the {blocks} above that failed to match. # self.io.tool_error(traceback.format_exc()) return f"Error executing command: {str(e)}" + def _execute_command_interactive(self, command_string): + """ + Execute an interactive shell command using run_cmd (which uses pexpect/PTY). + """ + try: + self.io.tool_output(f"⚙️ Starting interactive shell command: {command_string}") + self.io.tool_output(">>> You may need to interact with the command below <<<") + + # Use run_cmd which handles PTY logic + exit_status, combined_output = run_cmd( + command_string, + verbose=self.verbose, # Pass verbose flag + error_print=self.io.tool_error, # Use io for error printing + cwd=self.root # Execute in the project root + ) + + self.io.tool_output(">>> Interactive command finished <<<") + + # Format the output for the result message, include more content + output_content = combined_output or "" + # Use the existing token threshold constant as the character limit for truncation + output_limit = self.large_file_token_threshold + if len(output_content) > output_limit: + # Truncate and add a clear message using the constant value + output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)" + + if exit_status == 0: + return f"Interactive command finished successfully (exit code 0). Output:\n{output_content}" + else: + return f"Interactive command finished with exit code {exit_status}. Output:\n{output_content}" + + except Exception as e: + self.io.tool_error(f"Error executing interactive shell command '{command_string}': {str(e)}") + # Optionally include traceback for debugging if verbose + # if self.verbose: + # self.io.tool_error(traceback.format_exc()) + return f"Error executing interactive command: {str(e)}" def _process_file_mentions(self, content): """ diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 9f2f8fd1d..0d747736d 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -94,7 +94,9 @@ Act as an expert software engineer with the ability to autonomously navigate and ### Other Tools - **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` - Execute a shell command. Requires user confirmation. + Execute a *non-interactive* shell command. Requires user confirmation. Use for commands that don't need user input (e.g., `ls`, `git status`, `cat file`). +- **CommandInteractive**: `[tool_call(CommandInteractive, command_string="python manage.py shell")]` + Execute an *interactive* shell command using a pseudo-terminal (PTY). Use for commands that might require user interaction (e.g., running a shell, a development server, `ssh`). Does *not* require separate confirmation as interaction happens directly. ### Multi-Turn Exploration When you include any tool call, the system will automatically continue to the next round. From 24042e91d566b659849c7a8cf2d074526511f00e Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 04:59:42 -0400 Subject: [PATCH 14/63] Rename UndoChange's file argument to file_path --- aider/coders/navigator_coder.py | 45 +++++++++++++++---------------- aider/coders/navigator_prompts.py | 8 +++--- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 6edf488b4..d4196b324 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -695,13 +695,13 @@ class NavigatorCoder(Coder): ) else: result_message = "Error: Missing required parameters for IndentLines (file_path, start_pattern)" - + elif norm_tool_name == 'undochange': change_id = params.get('change_id') - last_file = params.get('last_file') - - result_message = self._execute_undo_change(change_id, last_file) - + file_path = params.get('file_path') + + result_message = self._execute_undo_change(change_id, file_path) + elif norm_tool_name == 'listchanges': file_path = params.get('file_path') limit = params.get('limit', 10) @@ -2321,38 +2321,38 @@ Just reply with fixed versions of the {blocks} above that failed to match. except Exception as e: self.io.tool_error(f"Error in DeleteBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback return f"Error: {str(e)}" - - def _execute_undo_change(self, change_id=None, last_file=None): + + def _execute_undo_change(self, change_id=None, file_path=None): """ Undo a specific change by ID, or the last change to a file. - + Parameters: - change_id: ID of the change to undo - - last_file: Path to file where the last change should be undone - + - file_path: Path to file where the last change should be undone + Returns a result message. """ # Note: Undo does not have a dry_run parameter as it's inherently about reverting a previous action. try: # Validate parameters - if change_id is None and last_file is None: - self.io.tool_error("Must specify either change_id or last_file for UndoChange") - return "Error: Must specify either change_id or last_file" # Improve Point 4 - - # If last_file is specified, get the most recent change for that file - if last_file: - abs_path = self.abs_root_path(last_file) + if change_id is None and file_path is None: + self.io.tool_error("Must specify either change_id or file_path for UndoChange") + return "Error: Must specify either change_id or file_path" # Improve Point 4 + + # If file_path is specified, get the most recent change for that file + if file_path: + abs_path = self.abs_root_path(file_path) rel_path = self.get_rel_fname(abs_path) - + change_id = self.change_tracker.get_last_change(rel_path) if not change_id: # Improve error message (Point 4) - self.io.tool_error(f"No tracked changes found for file '{last_file}' to undo.") - return f"Error: No changes found for file '{last_file}'" + self.io.tool_error(f"No tracked changes found for file '{file_path}' to undo.") + return f"Error: No changes found for file '{file_path}'" # Attempt to get undo information from the tracker success, message, change_info = self.change_tracker.undo_change(change_id) - + if not success: # Improve error message (Point 4) - message from tracker should be specific self.io.tool_error(f"Failed to undo change '{change_id}': {message}") @@ -3029,7 +3029,6 @@ Just reply with fixed versions of the {blocks} above that failed to match. target_insertion_line = len(target_content.splitlines()) if target_content else 0 target_diff_snippet = self._generate_diff_snippet_insert(original_target_content, target_insertion_line, extracted_lines) - # --- Handle Dry Run --- # --- Handle Dry Run --- if dry_run: num_extracted = end_line - start_line + 1 @@ -3280,4 +3279,4 @@ Just reply with fixed versions of the {blocks} above that failed to match. return "\n".join(diff_lines_output) except Exception as e: - return f"[Diff generation error: {e}]" + return f"[Diff generation error: {e}]" \ No newline at end of file diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 0d747736d..d2b535dbc 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -81,10 +81,10 @@ Act as an expert software engineer with the ability to autonomously navigate and - **IndentLines**: `[tool_call(IndentLines, file_path="...", start_pattern="...", end_pattern="...", indent_levels=1, near_context="...", occurrence=1, dry_run=False)]` Indent (`indent_levels` > 0) or unindent (`indent_levels` < 0) a block. Use `end_pattern` or `line_count` for range. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. - -- **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, last_file="...")]` - Undo a specific change by ID, or the last change made to `last_file`. - + +- **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, file_path="...")]` + Undo a specific change by ID, or the last change made to the specified `file_path`. + - **ListChanges**: `[tool_call(ListChanges, file_path="...", limit=5)]` List recent changes, optionally filtered by `file_path` and limited. From 17f06c86b2ece0b9a32f99b261195874f1e93b73 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 05:25:49 -0400 Subject: [PATCH 15/63] Accept SEARCH/REPLACE irrespective of tool calls. Separate tool calls, only after --- lines. --- aider/coders/navigator_coder.py | 108 ++++++++++++++++++++---------- aider/coders/navigator_prompts.py | 35 +++++++++- 2 files changed, 107 insertions(+), 36 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index d4196b324..3495cba6c 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -247,9 +247,9 @@ class NavigatorCoder(Coder): """Process the completed response from the LLM. This is a key method that: - 1. Processes any tool commands in the response - 2. If tool commands were found, sets up for another automatic round - 3. Otherwise, completes the response normally + 1. Processes any tool commands in the response (only after a '---' line) + 2. Processes any SEARCH/REPLACE blocks in the response (regardless of tool calls) + 3. If tool commands were found, sets up for another automatic round This enables the "auto-exploration" workflow where the LLM can iteratively discover and analyze relevant files before providing @@ -272,6 +272,20 @@ class NavigatorCoder(Coder): # Process implicit file mentions using the content *after* tool calls were removed self._process_file_mentions(processed_content) + # Check if the content contains the SEARCH/REPLACE markers (do this regardless of tool calls) + has_search = "<<<<<<< SEARCH" in self.partial_response_content + has_divider = "=======" in self.partial_response_content + has_replace = ">>>>>>> REPLACE" in self.partial_response_content + edit_match = has_search and has_divider and has_replace + + if edit_match: + self.io.tool_output("Detected edit blocks, applying changes within Navigator...") + edited_files = self._apply_edits_from_response() + # If _apply_edits_from_response set a reflected_message (due to errors), + # return False to trigger a reflection loop. + if self.reflected_message: + return False + # If any tool calls were found and we haven't exceeded reflection limits, set up for another iteration # This is implicit continuation when any tool calls are present, rather than requiring Continue explicitly if tool_calls_found and self.num_reflections < self.max_reflections: @@ -319,23 +333,6 @@ class NavigatorCoder(Coder): # Append results to the cleaned content self.partial_response_content += results_block - # Check if the content contains the SEARCH/REPLACE markers - has_search = "<<<<<<< SEARCH" in self.partial_response_content - has_divider = "=======" in self.partial_response_content - has_replace = ">>>>>>> REPLACE" in self.partial_response_content - edit_match = has_search and has_divider and has_replace - - if edit_match: - self.io.tool_output("Detected edit blocks, applying changes within Navigator...") - edited_files = self._apply_edits_from_response() - # If _apply_edits_from_response set a reflected_message (due to errors), - # return False to trigger a reflection loop. - if self.reflected_message: - return False - else: - # No edits detected. - pass - # After applying edits OR determining no edits were needed (and no reflection needed), # the turn is complete. Reset counters and finalize history. self.tool_call_count = 0 @@ -347,6 +344,11 @@ class NavigatorCoder(Coder): def _process_tool_commands(self, content): """ Process tool commands in the `[tool_call(name, param=value)]` format within the content. + + Rules: + 1. Tool calls must appear after the LAST '---' line separator in the content + 2. Any tool calls before this last separator are treated as text (not executed) + Returns processed content, result messages, and a flag indicating if any tool calls were found. """ result_messages = [] @@ -355,29 +357,43 @@ class NavigatorCoder(Coder): call_count = 0 max_calls = self.max_tool_calls - # Find tool calls using a more robust method - processed_content = "" + # Check if there's a '---' separator and only process tool calls after the LAST one + separator_marker = "\n---\n" + content_parts = content.split(separator_marker) + + # If there's no separator, treat the entire content as before the separator + if len(content_parts) == 1: + # Return the original content with no tool calls processed + return content, result_messages, False + + # Take everything before the last separator (including intermediate separators) + content_before_separator = separator_marker.join(content_parts[:-1]) + separator_marker + # Take only what comes after the last separator + content_after_separator = content_parts[-1] + + # Find tool calls using a more robust method, but only in the content after separator + processed_content = content_before_separator last_index = 0 start_marker = "[tool_call(" end_marker = "]" # The parenthesis balancing finds the ')', we just need the final ']' while True: - start_pos = content.find(start_marker, last_index) + start_pos = content_after_separator.find(start_marker, last_index) if start_pos == -1: - processed_content += content[last_index:] + processed_content += content_after_separator[last_index:] break # Check for escaped tool call: \[tool_call( - if start_pos > 0 and content[start_pos - 1] == '\\': + if start_pos > 0 and content_after_separator[start_pos - 1] == '\\': # Append the content including the escaped marker # We append up to start_pos + len(start_marker) to include the marker itself. - processed_content += content[last_index : start_pos + len(start_marker)] + processed_content += content_after_separator[last_index : start_pos + len(start_marker)] # Update last_index to search after this escaped marker last_index = start_pos + len(start_marker) continue # Continue searching for the next potential marker # Append content before the (non-escaped) tool call - processed_content += content[last_index:start_pos] + processed_content += content_after_separator[last_index:start_pos] scan_start_pos = start_pos + len(start_marker) paren_level = 1 @@ -387,8 +403,8 @@ class NavigatorCoder(Coder): end_paren_pos = -1 # Scan to find the matching closing parenthesis, respecting quotes - for i in range(scan_start_pos, len(content)): - char = content[i] + for i in range(scan_start_pos, len(content_after_separator)): + char = content_after_separator[i] if escaped: escaped = False @@ -411,17 +427,36 @@ class NavigatorCoder(Coder): actual_end_marker_start = -1 end_marker_found = False if end_paren_pos != -1: # Only search if we found a closing parenthesis - for j in range(expected_end_marker_start, len(content)): - if not content[j].isspace(): + for j in range(expected_end_marker_start, len(content_after_separator)): + if not content_after_separator[j].isspace(): actual_end_marker_start = j # Check if the found character is the end marker ']' - if content[actual_end_marker_start] == end_marker: + if content_after_separator[actual_end_marker_start] == end_marker: end_marker_found = True break # Stop searching after first non-whitespace char if not end_marker_found: + # Try to extract the tool name for better error message + tool_name = "unknown" + try: + # Look for the first comma after the tool call start + partial_content = content_after_separator[scan_start_pos:scan_start_pos+100] # Limit to avoid huge strings + comma_pos = partial_content.find(',') + if comma_pos > 0: + tool_name = partial_content[:comma_pos].strip() + else: + # If no comma, look for opening parenthesis or first whitespace + space_pos = partial_content.find(' ') + paren_pos = partial_content.find('(') + if space_pos > 0 and (paren_pos < 0 or space_pos < paren_pos): + tool_name = partial_content[:space_pos].strip() + elif paren_pos > 0: + tool_name = partial_content[:paren_pos].strip() + except: + pass # Silently fail if we can't extract the name + # Malformed call: couldn't find matching ')' or the subsequent ']' - self.io.tool_warning(f"Malformed tool call starting at index {start_pos}. Skipping (end_paren_pos={end_paren_pos}, end_marker_found={end_marker_found}).") + self.io.tool_warning(f"Malformed tool call for '{tool_name}'. Missing closing parenthesis or bracket. Skipping.") # Append the start marker itself to processed content so it's not lost processed_content += start_marker last_index = scan_start_pos # Continue searching after the marker @@ -429,8 +464,8 @@ class NavigatorCoder(Coder): # Found a potential tool call # Adjust full_match_str and last_index based on the actual end marker ']' position - full_match_str = content[start_pos : actual_end_marker_start + 1] # End marker ']' is 1 char - inner_content = content[scan_start_pos:end_paren_pos].strip() + full_match_str = content_after_separator[start_pos : actual_end_marker_start + 1] # End marker ']' is 1 char + inner_content = content_after_separator[scan_start_pos:end_paren_pos].strip() last_index = actual_end_marker_start + 1 # Move past the processed call (including ']') @@ -445,6 +480,9 @@ class NavigatorCoder(Coder): params = {} result_message = None + # Mark that we found at least one tool call (assuming it passes validation) + tool_calls_found = True + try: # Wrap the inner content to make it parseable as a function call # Example: ToolName, key="value" becomes f(ToolName, key="value") diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index d2b535dbc..68ed7fefc 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -114,6 +114,7 @@ When you include any tool call, the system will automatically continue to the ne 6. **Final Response**: Omit all tool calls when you have sufficient information to provide a final answer ### Tool Usage Best Practices +- All tool calls MUST be placed after a '---' line separator at the end of your message - Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution - Tool names are case-insensitive; parameters can be unquoted or quoted - Verify files aren't already in context before adding them with `View` @@ -121,6 +122,17 @@ When you include any tool call, the system will automatically continue to the ne - Target specific patterns rather than overly broad searches - Remember the `ViewFilesWithSymbol` tool is optimized for locating symbols across the codebase +### Format Example +``` +Your answer to the user's question... + +SEARCH/REPLACE blocks can appear anywhere in your response if needed. + +--- +[tool_call(ViewFilesMatching, pattern="findme")] +[tool_call(Command, command_string="ls -la")] +``` + ## Granular Editing Workflow **Note on Sequential Edits:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. Always check the result message and diff snippet after each edit. @@ -257,6 +269,7 @@ Always reply to the user in {language}. role="assistant", content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files. +--- [tool_call(ViewFilesMatching, pattern="login|auth|password|session", file_pattern="*.py")]""", ), dict( @@ -267,6 +280,7 @@ Always reply to the user in {language}. role="assistant", content="""Great, I've found several files related to authentication. Let me examine them to understand the login flow. +--- [tool_call(View, file_path="auth/models.py")] [tool_call(View, file_path="auth/views.py")] [tool_call(View, file_path="users/authentication.py")]""", @@ -325,11 +339,30 @@ Here are summaries of some files present in this repo: system_reminder = """ ## Tool Command Reminder +- All tool calls MUST appear after a '---' line separator at the end of your message - To execute a tool, use: `[tool_call(ToolName, param1=value1)]` -- To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]` +- To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]` - Including ANY tool call will automatically continue to the next round - For final answers, do NOT include any tool calls +## SEARCH/REPLACE and Tool Call Format +- SEARCH/REPLACE blocks can appear anywhere in your response +- Tool calls MUST be at the end of your message, after a '---' separator +- Format example: + ``` + Your answer text here... + + file.py + <<<<<<< SEARCH + old code + ======= + new code + >>>>>>> REPLACE + + --- + [tool_call(ToolName, param1=value1)] + ``` + ## Context Features - Use enhanced context blocks (directory structure and git status) to orient yourself - Toggle context blocks with `/context-blocks` From 1e01482be6b1c24c2cf109751b1331e234a1f8d3 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 05:48:25 -0400 Subject: [PATCH 16/63] Include a tree-sitter based outline of open files for the LLM --- aider/coders/navigator_coder.py | 122 +++++++++++++++++++++++++++----- aider/repomap.py | 45 ++++++++++-- 2 files changed, 147 insertions(+), 20 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 3495cba6c..6f824551b 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -7,11 +7,22 @@ import random import subprocess import traceback import platform +import ast +import re +import fnmatch +import os +import time +import random +import subprocess +import traceback +import platform import locale from datetime import datetime from pathlib import Path import xml.etree.ElementTree as ET from xml.etree.ElementTree import ParseError +# Add necessary imports if not already present +from collections import defaultdict from .base_coder import Coder from .editblock_coder import find_original_update_blocks, do_replace, find_similar_lines @@ -61,28 +72,104 @@ class NavigatorCoder(Coder): # Enable enhanced context blocks by default self.use_enhanced_context = True + def get_context_symbol_outline(self): + """ + Generate a symbol outline for files currently in context using Tree-sitter, + bypassing the cache for freshness. + """ + if not self.use_enhanced_context or not self.repo_map: + return None + + try: + result = "\n" + result += "## Symbol Outline (Current Context)\n\n" + result += "Code definitions (classes, functions, methods, etc.) found in files currently in chat context.\n\n" + + files_to_outline = list(self.abs_fnames) + list(self.abs_read_only_fnames) + if not files_to_outline: + result += "No files currently in context.\n" + result += "" + return result + + all_tags_by_file = defaultdict(list) + has_symbols = False + + # Use repo_map which should be initialized in BaseCoder + if not self.repo_map: + self.io.tool_warning("RepoMap not initialized, cannot generate symbol outline.") + return None # Or return a message indicating repo map is unavailable + + for abs_fname in sorted(files_to_outline): + rel_fname = self.get_rel_fname(abs_fname) + try: + # Call get_tags_raw directly to bypass cache and ensure freshness + tags = list(self.repo_map.get_tags_raw(abs_fname, rel_fname)) + if tags: + all_tags_by_file[rel_fname].extend(tags) + has_symbols = True + except Exception as e: + self.io.tool_warning(f"Could not get symbols for {rel_fname}: {e}") + + if not has_symbols: + result += "No symbols found in the current context files.\n" + else: + for rel_fname in sorted(all_tags_by_file.keys()): + tags = sorted(all_tags_by_file[rel_fname], key=lambda t: (t.line, t.name)) + + definition_tags = [] + for tag in tags: + # Use specific_kind first if available, otherwise fall back to kind + kind_to_check = tag.specific_kind or tag.kind + # Check if the kind represents a definition using the set from RepoMap + if kind_to_check and kind_to_check.lower() in self.repo_map.definition_kinds: + definition_tags.append(tag) + + if definition_tags: + result += f"### {rel_fname}\n" + # Simple list format for now, could be enhanced later (e.g., indentation for scope) + for tag in definition_tags: + # Display line number if available + line_info = f", line {tag.line + 1}" if tag.line >= 0 else "" + # Display the specific kind (which we checked) + kind_to_check = tag.specific_kind or tag.kind # Recalculate for safety + result += f"- {tag.name} ({kind_to_check}{line_info})\n" + result += "\n" # Add space between files + + result += "" + return result.strip() # Remove trailing newline if any + + except Exception as e: + self.io.tool_error(f"Error generating symbol outline: {str(e)}") + # Optionally include traceback for debugging if verbose + # if self.verbose: + # self.io.tool_error(traceback.format_exc()) + return None + def format_chat_chunks(self): """ - Override parent's format_chat_chunks to include enhanced context blocks with a + Override parent's format_chat_chunks to include enhanced context blocks with a cleaner, more hierarchical structure for better organization. """ # First get the normal chat chunks from the parent method - chunks = super().format_chat_chunks() - + chunks = super().format_chat_chunks() # Calls BaseCoder's format_chat_chunks + # If enhanced context blocks are enabled, prepend them to the current messages if self.use_enhanced_context: # Create environment info context block env_context = self.get_environment_info() - - # Get directory structure - dir_structure = self.get_directory_structure() - - # Get git status - git_status = self.get_git_status() - + # Get current context summary context_summary = self.get_context_summary() - + + # Get directory structure + dir_structure = self.get_directory_structure() + + # Get git status + git_status = self.get_git_status() + + # Get symbol outline for current context files + symbol_outline = self.get_context_symbol_outline() # New call + # Collect all context blocks that exist context_blocks = [] if env_context: @@ -93,21 +180,24 @@ class NavigatorCoder(Coder): context_blocks.append(dir_structure) if git_status: context_blocks.append(git_status) - - # If we have any context blocks, prepend them to the current messages + if symbol_outline: # Add the new block if it was generated + context_blocks.append(symbol_outline) + + # If we have any context blocks, prepend them to the system message if context_blocks: context_message = "\n\n".join(context_blocks) # Prepend to system context but don't overwrite existing system content if chunks.system: # If we already have system messages, append our context to the first one original_content = chunks.system[0]["content"] + # Ensure there's separation between our blocks and the original prompt chunks.system[0]["content"] = context_message + "\n\n" + original_content else: # Otherwise, create a new system message chunks.system = [dict(role="system", content=context_message)] - + return chunks - + def get_context_summary(self): """ Generate a summary of the current file context, including editable and read-only files, @@ -3317,4 +3407,4 @@ Just reply with fixed versions of the {blocks} above that failed to match. return "\n".join(diff_lines_output) except Exception as e: - return f"[Diff generation error: {e}]" \ No newline at end of file + return f"[Diff generation error: {e}]" diff --git a/aider/repomap.py b/aider/repomap.py index 598770d18..b21d65f02 100644 --- a/aider/repomap.py +++ b/aider/repomap.py @@ -25,15 +25,23 @@ from aider.utils import Spinner warnings.simplefilter("ignore", category=FutureWarning) from grep_ast.tsl import USING_TSL_PACK, get_language, get_parser # noqa: E402 -Tag = namedtuple("Tag", "rel_fname fname line name kind".split()) +# Define the Tag namedtuple with a default for specific_kind to maintain compatibility +# with cached entries that might have been created with the old definition +class TagBase(namedtuple("TagBase", "rel_fname fname line name kind specific_kind")): + __slots__ = () + def __new__(cls, rel_fname, fname, line, name, kind, specific_kind=None): + # Provide a default value for specific_kind to handle old cached objects + return super(TagBase, cls).__new__(cls, rel_fname, fname, line, name, kind, specific_kind) + +Tag = TagBase SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError, OSError) -CACHE_VERSION = 3 +CACHE_VERSION = 5 if USING_TSL_PACK: - CACHE_VERSION = 4 + CACHE_VERSION = 6 class RepoMap: @@ -41,6 +49,17 @@ class RepoMap: warned_files = set() + # Define kinds that typically represent definitions across languages + # Used by NavigatorCoder to filter tags for the symbol outline + definition_kinds = { + "class", "struct", "enum", "interface", "trait", # Structure definitions + "function", "method", "constructor", # Function/method definitions + "module", "namespace", # Module/namespace definitions + "constant", "variable", # Top-level/class variable definitions (consider refining) + "type", # Type definitions + # Add more based on tree-sitter queries if needed + } + def __init__( self, map_tokens=1024, @@ -242,10 +261,23 @@ class RepoMap: if val is not None and val.get("mtime") == file_mtime: try: - return self.TAGS_CACHE[cache_key]["data"] + # Get the cached data + data = self.TAGS_CACHE[cache_key]["data"] + + # Let our Tag class handle compatibility with old cache formats + # No need for special handling as TagBase.__new__ will supply default specific_kind + + return data except SQLITE_ERRORS as e: self.tags_cache_error(e) return self.TAGS_CACHE[cache_key]["data"] + except (TypeError, AttributeError) as e: + # If we hit an error related to missing fields in old cached Tag objects, + # force a cache refresh for this file + if self.verbose: + self.io.tool_warning(f"Cache format error for {fname}, refreshing: {e}") + # Return empty list to trigger cache refresh + return [] # miss! data = list(self.get_tags_raw(fname, rel_fname)) @@ -304,11 +336,15 @@ class RepoMap: saw.add(kind) + # Extract specific kind from the tag, e.g., 'function' from 'name.definition.function' + specific_kind = tag.split('.')[-1] if '.' in tag else None + result = Tag( rel_fname=rel_fname, fname=fname, name=node.text.decode("utf-8"), kind=kind, + specific_kind=specific_kind, line=node.start_point[0], ) @@ -338,6 +374,7 @@ class RepoMap: fname=fname, name=token, kind="ref", + specific_kind="name", # Default for pygments fallback line=-1, ) From 832f478b13b0901ccdbe651f3bf52ded6c931503 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 06:05:26 -0400 Subject: [PATCH 17/63] Add more guidance /to/ use granular tools and use-cases for each. --- aider/coders/navigator_prompts.py | 37 ++++++++++++++++++------------- aider/tools/__init__.py | 0 2 files changed, 21 insertions(+), 16 deletions(-) create mode 100644 aider/tools/__init__.py diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 68ed7fefc..e5f7f0214 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -63,24 +63,31 @@ Act as an expert software engineer with the ability to autonomously navigate and ### Granular Editing Tools - **ReplaceText**: `[tool_call(ReplaceText, file_path="...", find_text="...", replace_text="...", near_context="...", occurrence=1, dry_run=False)]` Replace specific text. `near_context` (optional) helps find the right spot. `occurrence` (optional, default 1) specifies which match (-1 for last). `dry_run=True` simulates the change. + *Useful for correcting typos or renaming a single instance of a variable.* - **ReplaceAll**: `[tool_call(ReplaceAll, file_path="...", find_text="...", replace_text="...", dry_run=False)]` Replace ALL occurrences of text. Use with caution. `dry_run=True` simulates the change. + *Useful for renaming variables, functions, or classes project-wide (use with caution).* - **InsertBlock**: `[tool_call(InsertBlock, file_path="...", content="...", after_pattern="...", near_context="...", occurrence=1, dry_run=False)]` Insert a block after (`after_pattern`) or before (`before_pattern`) a pattern line. Use `near_context` and `occurrence` (optional, default 1, -1 for last) to specify which pattern match. `dry_run=True` simulates. + *Useful for adding new functions, methods, or blocks of configuration.* - **DeleteBlock**: `[tool_call(DeleteBlock, file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]` Delete block from `start_pattern` line to `end_pattern` line (inclusive). Use `line_count` instead of `end_pattern` for fixed number of lines. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. + *Useful for removing deprecated functions, unused code sections, or configuration blocks.* - **ReplaceLine**: `[tool_call(ReplaceLine, file_path="...", line_number=42, new_content="...", dry_run=False)]` Replace a specific line number (1-based). `dry_run=True` simulates. + *Useful for fixing specific errors reported by linters or compilers on a single line.* - **ReplaceLines**: `[tool_call(ReplaceLines, file_path="...", start_line=42, end_line=45, new_content="...", dry_run=False)]` Replace a range of lines (1-based, inclusive). `dry_run=True` simulates. + *Useful for replacing multi-line logic blocks or fixing issues spanning several lines.* - **IndentLines**: `[tool_call(IndentLines, file_path="...", start_pattern="...", end_pattern="...", indent_levels=1, near_context="...", occurrence=1, dry_run=False)]` Indent (`indent_levels` > 0) or unindent (`indent_levels` < 0) a block. Use `end_pattern` or `line_count` for range. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. + *Useful for fixing indentation errors reported by linters or reformatting code blocks.* - **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, file_path="...")]` Undo a specific change by ID, or the last change made to the specified `file_path`. @@ -90,7 +97,7 @@ Act as an expert software engineer with the ability to autonomously navigate and - **ExtractLines**: `[tool_call(ExtractLines, source_file_path="...", target_file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]` Extract lines from `start_pattern` to `end_pattern` (or use `line_count`) in `source_file_path` and move them to `target_file_path`. Creates `target_file_path` if it doesn't exist. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. - *Useful for refactoring by moving functions, classes, or blocks of code into separate files.* + *Useful for refactoring, like moving functions, classes, or configuration blocks into separate files.* ### Other Tools - **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` @@ -138,7 +145,7 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. **Note on Sequential Edits:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. Always check the result message and diff snippet after each edit. 1. **Discover and View Files**: Use `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol` to locate relevant files. Use `View` to add specific files. -2. **Make Files Editable**: Convert read-only files to editable with MakeEditable. +2. **Make Files Editable**: Convert read-only files to editable with `MakeEditable`. For efficiency, you may include this tool call in the *same message* as the edit tool calls or SEARCH/REPLACE blocks that follow for the same file. 3. **Apply Edits (Default: Direct Edit)**: * For most edits where you are confident in the parameters (file path, patterns, line numbers), apply the change directly using the tool with `dry_run=False` (or omitting the parameter). * **Crucially, always review the diff snippet provided in the `[Result (ToolName): ...]` message** to confirm the change was applied correctly and in the intended location. @@ -161,7 +168,9 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. ## Code Editing Process -### Granular Editing with Tool Calls +### Granular Editing with Tool Calls (Preferred Method) +**Strongly prefer using the granular editing tools below for all code modifications.** They offer precision and reduce the risk of errors compared to SEARCH/REPLACE blocks. Only resort to SEARCH/REPLACE for complex, multi-location refactoring where granular tools would be exceptionally cumbersome. + For precise, targeted edits to code, use the granular editing tools: - **ReplaceText**: Replace specific instances of text in a file @@ -226,8 +235,8 @@ def new_function(param1, param2): """)] ``` -### SEARCH/REPLACE Block Format (Alternative Method) -For larger changes that involve multiple edits or significant restructuring, you can still use SEARCH/REPLACE blocks with this exact format: +### SEARCH/REPLACE Block Format (Use Sparingly) +**Again, prefer granular tools.** However, as a fallback, you can use SEARCH/REPLACE blocks with this exact format: ````python path/to/file.ext @@ -239,14 +248,13 @@ Replacement code lines ```` NOTE that this uses four backticks as the fence and not three! -### Editing Guidelines +#### Guidelines for SEARCH/REPLACE - Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation - Keep edit blocks focused and concise - include only the necessary context - Include enough lines for uniqueness but avoid long unchanged sections - For new files, use an empty SEARCH section - To move code within a file, use two separate SEARCH/REPLACE blocks - Respect the file paths exactly as they appear -{quad_backtick_reminder} ### Error Handling - If a tool call returns an error message, analyze the error and try correcting the tool call parameters. @@ -343,11 +351,15 @@ Here are summaries of some files present in this repo: - To execute a tool, use: `[tool_call(ToolName, param1=value1)]` - To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]` - Including ANY tool call will automatically continue to the next round +- When editing with tools, you'll receive feedback to let you know how your edits went after they're applied - For final answers, do NOT include any tool calls -## SEARCH/REPLACE and Tool Call Format -- SEARCH/REPLACE blocks can appear anywhere in your response +## Tool Call Format - Tool calls MUST be at the end of your message, after a '---' separator +- You are encouraged to use tools for editing where possible, falling back to SEARCH/REPLACE when that doesn't work well. + +## SEARCH/REPLACE blocks +- When you use them, SEARCH/REPLACE blocks can appear anywhere in your response - Format example: ``` Your answer text here... @@ -368,13 +380,6 @@ Here are summaries of some files present in this repo: - Toggle context blocks with `/context-blocks` - Toggle large file truncation with `/context-management` -## Code Editing Reminder -When editing: -1. Make target files editable with `[tool_call(MakeEditable, file_path="...")]` -2. Use SEARCH/REPLACE blocks that EXACTLY match existing content -3. Keep edit blocks focused and concise -4. For ambiguous user inputs like "ok" or "go ahead", assume they want you to implement the changes - {lazy_prompt} {shell_cmd_reminder} diff --git a/aider/tools/__init__.py b/aider/tools/__init__.py new file mode 100644 index 000000000..e69de29bb From 9275bbc92acecb952dc35b4d067f418b45658efd Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 06:19:54 -0400 Subject: [PATCH 18/63] Add a comment to IndentLines --- aider/coders/navigator_prompts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index e5f7f0214..044461419 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -87,7 +87,7 @@ Act as an expert software engineer with the ability to autonomously navigate and - **IndentLines**: `[tool_call(IndentLines, file_path="...", start_pattern="...", end_pattern="...", indent_levels=1, near_context="...", occurrence=1, dry_run=False)]` Indent (`indent_levels` > 0) or unindent (`indent_levels` < 0) a block. Use `end_pattern` or `line_count` for range. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. - *Useful for fixing indentation errors reported by linters or reformatting code blocks.* + *Useful for fixing indentation errors reported by linters or reformatting code blocks. Also helpful for adjusting indentation after moving code with `ExtractLines`.* - **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, file_path="...")]` Undo a specific change by ID, or the last change made to the specified `file_path`. From 9cb0f5e203d98e5dd5c5a784a46a64d0ce3020bf Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 06:22:51 -0400 Subject: [PATCH 19/63] Split tools out of NavigatorCoder. --- aider/coders/navigator_coder.py | 1855 +------------------------ aider/tools/command.py | 55 + aider/tools/command_interactive.py | 40 + aider/tools/delete_block.py | 171 +++ aider/tools/extract_lines.py | 220 +++ aider/tools/indent_lines.py | 197 +++ aider/tools/insert_block.py | 160 +++ aider/tools/list_changes.py | 46 + aider/tools/ls.py | 48 + aider/tools/make_editable.py | 46 + aider/tools/make_readonly.py | 30 + aider/tools/remove.py | 45 + aider/tools/replace_all.py | 95 ++ aider/tools/replace_line.py | 115 ++ aider/tools/replace_lines.py | 139 ++ aider/tools/replace_text.py | 125 ++ aider/tools/undo_change.py | 56 + aider/tools/view.py | 13 + aider/tools/view_files_at_glob.py | 55 + aider/tools/view_files_matching.py | 71 + aider/tools/view_files_with_symbol.py | 75 + 21 files changed, 1853 insertions(+), 1804 deletions(-) create mode 100644 aider/tools/command.py create mode 100644 aider/tools/command_interactive.py create mode 100644 aider/tools/delete_block.py create mode 100644 aider/tools/extract_lines.py create mode 100644 aider/tools/indent_lines.py create mode 100644 aider/tools/insert_block.py create mode 100644 aider/tools/list_changes.py create mode 100644 aider/tools/ls.py create mode 100644 aider/tools/make_editable.py create mode 100644 aider/tools/make_readonly.py create mode 100644 aider/tools/remove.py create mode 100644 aider/tools/replace_all.py create mode 100644 aider/tools/replace_line.py create mode 100644 aider/tools/replace_lines.py create mode 100644 aider/tools/replace_text.py create mode 100644 aider/tools/undo_change.py create mode 100644 aider/tools/view.py create mode 100644 aider/tools/view_files_at_glob.py create mode 100644 aider/tools/view_files_matching.py create mode 100644 aider/tools/view_files_with_symbol.py diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 6f824551b..a2e28fe8e 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -33,6 +33,27 @@ from aider import urls from aider.run_cmd import run_cmd, run_cmd_subprocess # Import the change tracker from aider.change_tracker import ChangeTracker +# Import tool functions +from aider.tools.view_files_at_glob import execute_view_files_at_glob +from aider.tools.view_files_matching import execute_view_files_matching +from aider.tools.ls import execute_ls +from aider.tools.view import execute_view +from aider.tools.remove import _execute_remove # Renamed to avoid conflict with os.remove +from aider.tools.make_editable import _execute_make_editable +from aider.tools.make_readonly import _execute_make_readonly +from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol +from aider.tools.command import _execute_command +from aider.tools.command_interactive import _execute_command_interactive +from aider.tools.replace_text import _execute_replace_text +from aider.tools.replace_all import _execute_replace_all +from aider.tools.insert_block import _execute_insert_block +from aider.tools.delete_block import _execute_delete_block +from aider.tools.replace_line import _execute_replace_line +from aider.tools.replace_lines import _execute_replace_lines +from aider.tools.indent_lines import _execute_indent_lines +from aider.tools.undo_change import _execute_undo_change +from aider.tools.list_changes import _execute_list_changes +from aider.tools.extract_lines import _execute_extract_lines class NavigatorCoder(Coder): """Mode where the LLM autonomously manages which files are in context.""" @@ -653,62 +674,63 @@ class NavigatorCoder(Coder): if norm_tool_name == 'viewfilesatglob': pattern = params.get('pattern') if pattern is not None: - result_message = self._execute_view_files_at_glob(pattern) + # Call the imported function + result_message = execute_view_files_at_glob(self, pattern) else: result_message = "Error: Missing 'pattern' parameter for ViewFilesAtGlob" elif norm_tool_name == 'viewfilesmatching': pattern = params.get('pattern') file_pattern = params.get('file_pattern') # Optional if pattern is not None: - result_message = self._execute_view_files_matching(pattern, file_pattern) + result_message = execute_view_files_matching(self, pattern, file_pattern) else: result_message = "Error: Missing 'pattern' parameter for ViewFilesMatching" elif norm_tool_name == 'ls': directory = params.get('directory') if directory is not None: - result_message = self._execute_ls(directory) + result_message = execute_ls(self, directory) else: result_message = "Error: Missing 'directory' parameter for Ls" elif norm_tool_name == 'view': file_path = params.get('file_path') if file_path is not None: - result_message = self._execute_view(file_path) + result_message = execute_view(self, file_path) else: result_message = "Error: Missing 'file_path' parameter for View" elif norm_tool_name == 'remove': file_path = params.get('file_path') if file_path is not None: - result_message = self._execute_remove(file_path) + result_message = _execute_remove(self, file_path) else: result_message = "Error: Missing 'file_path' parameter for Remove" elif norm_tool_name == 'makeeditable': file_path = params.get('file_path') if file_path is not None: - result_message = self._execute_make_editable(file_path) + result_message = _execute_make_editable(self, file_path) else: result_message = "Error: Missing 'file_path' parameter for MakeEditable" elif norm_tool_name == 'makereadonly': file_path = params.get('file_path') if file_path is not None: - result_message = self._execute_make_readonly(file_path) + result_message = _execute_make_readonly(self, file_path) else: result_message = "Error: Missing 'file_path' parameter for MakeReadonly" elif norm_tool_name == 'viewfileswithsymbol': symbol = params.get('symbol') if symbol is not None: - result_message = self._execute_view_files_with_symbol(symbol) + result_message = _execute_view_files_with_symbol(self, symbol) else: result_message = "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" elif norm_tool_name == 'command': command_string = params.get('command_string') if command_string is not None: - result_message = self._execute_command(command_string) + result_message = _execute_command(self, command_string) else: result_message = "Error: Missing 'command_string' parameter for Command" elif norm_tool_name == 'commandinteractive': command_string = params.get('command_string') if command_string is not None: - result_message = self._execute_command_interactive(command_string) + result_message = _execute_command_interactive(self, command_string) else: result_message = "Error: Missing 'command_string' parameter for CommandInteractive" @@ -723,8 +745,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # Default to False if file_path is not None and find_text is not None and replace_text is not None: - result_message = self._execute_replace_text( - file_path, find_text, replace_text, near_context, occurrence, change_id, dry_run + result_message = _execute_replace_text( + self, file_path, find_text, replace_text, near_context, occurrence, change_id, dry_run ) else: result_message = "Error: Missing required parameters for ReplaceText (file_path, find_text, replace_text)" @@ -737,8 +759,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # Default to False if file_path is not None and find_text is not None and replace_text is not None: - result_message = self._execute_replace_all( - file_path, find_text, replace_text, change_id, dry_run + result_message = _execute_replace_all( + self, file_path, find_text, replace_text, change_id, dry_run ) else: result_message = "Error: Missing required parameters for ReplaceAll (file_path, find_text, replace_text)" @@ -754,8 +776,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # New, default False if file_path is not None and content is not None and (after_pattern is not None or before_pattern is not None): - result_message = self._execute_insert_block( - file_path, content, after_pattern, before_pattern, near_context, occurrence, change_id, dry_run + result_message = _execute_insert_block( + self, file_path, content, after_pattern, before_pattern, near_context, occurrence, change_id, dry_run ) else: result_message = "Error: Missing required parameters for InsertBlock (file_path, content, and either after_pattern or before_pattern)" @@ -771,8 +793,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # New, default False if file_path is not None and start_pattern is not None: - result_message = self._execute_delete_block( - file_path, start_pattern, end_pattern, line_count, near_context, occurrence, change_id, dry_run + result_message = _execute_delete_block( + self, file_path, start_pattern, end_pattern, line_count, near_context, occurrence, change_id, dry_run ) else: result_message = "Error: Missing required parameters for DeleteBlock (file_path, start_pattern)" @@ -785,8 +807,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # New, default False if file_path is not None and line_number is not None and new_content is not None: - result_message = self._execute_replace_line( - file_path, line_number, new_content, change_id, dry_run + result_message = _execute_replace_line( + self, file_path, line_number, new_content, change_id, dry_run ) else: result_message = "Error: Missing required parameters for ReplaceLine (file_path, line_number, new_content)" @@ -800,8 +822,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # New, default False if file_path is not None and start_line is not None and end_line is not None and new_content is not None: - result_message = self._execute_replace_lines( - file_path, start_line, end_line, new_content, change_id, dry_run + result_message = _execute_replace_lines( + self, file_path, start_line, end_line, new_content, change_id, dry_run ) else: result_message = "Error: Missing required parameters for ReplaceLines (file_path, start_line, end_line, new_content)" @@ -818,8 +840,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) # New, default False if file_path is not None and start_pattern is not None: - result_message = self._execute_indent_lines( - file_path, start_pattern, end_pattern, line_count, indent_levels, near_context, occurrence, change_id, dry_run + result_message = _execute_indent_lines( + self, file_path, start_pattern, end_pattern, line_count, indent_levels, near_context, occurrence, change_id, dry_run ) else: result_message = "Error: Missing required parameters for IndentLines (file_path, start_pattern)" @@ -828,13 +850,13 @@ class NavigatorCoder(Coder): change_id = params.get('change_id') file_path = params.get('file_path') - result_message = self._execute_undo_change(change_id, file_path) + result_message = _execute_undo_change(self, change_id, file_path) elif norm_tool_name == 'listchanges': file_path = params.get('file_path') limit = params.get('limit', 10) - result_message = self._execute_list_changes(file_path, limit) + result_message = _execute_list_changes(self, file_path, limit) elif norm_tool_name == 'extractlines': source_file_path = params.get('source_file_path') @@ -847,8 +869,8 @@ class NavigatorCoder(Coder): dry_run = params.get('dry_run', False) if source_file_path and target_file_path and start_pattern: - result_message = self._execute_extract_lines( - source_file_path, target_file_path, start_pattern, end_pattern, + result_message = _execute_extract_lines( + self, source_file_path, target_file_path, start_pattern, end_pattern, line_count, near_context, occurrence, dry_run ) else: @@ -1050,187 +1072,9 @@ Just reply with fixed versions of the {blocks} above that failed to match. return edited_files - def _execute_view_files_at_glob(self, pattern): - """ - Execute a glob pattern and add matching files to context as read-only. - This tool helps the LLM find files by pattern matching, similar to - how a developer would use glob patterns to find files. - """ - try: - # Find files matching the pattern - matching_files = [] - - # Make the pattern relative to root if it's absolute - if pattern.startswith('/'): - pattern = os.path.relpath(pattern, self.root) - - # Get all files in the repo - all_files = self.get_all_relative_files() - - # Find matches with pattern matching - for file in all_files: - if fnmatch.fnmatch(file, pattern): - matching_files.append(file) - - # Limit the number of files added if there are too many matches - if len(matching_files) > self.max_files_per_glob: - self.io.tool_output( - f"⚠️ Found {len(matching_files)} files matching '{pattern}', " - f"limiting to {self.max_files_per_glob} most relevant files." - ) - # Sort by modification time (most recent first) - matching_files.sort(key=lambda f: os.path.getmtime(self.abs_root_path(f)), reverse=True) - matching_files = matching_files[:self.max_files_per_glob] - - # Add files to context - for file in matching_files: - self._add_file_to_context(file) - - # Return a user-friendly result - if matching_files: - if len(matching_files) > 10: - brief = ', '.join(matching_files[:5]) + f', and {len(matching_files)-5} more' - self.io.tool_output(f"📂 Added {len(matching_files)} files matching '{pattern}': {brief}") - else: - self.io.tool_output(f"📂 Added files matching '{pattern}': {', '.join(matching_files)}") - return f"Added {len(matching_files)} files: {', '.join(matching_files[:5])}{' and more' if len(matching_files) > 5 else ''}" - else: - self.io.tool_output(f"⚠️ No files found matching '{pattern}'") - return f"No files found matching '{pattern}'" - except Exception as e: - self.io.tool_error(f"Error in ViewFilesAtGlob: {str(e)}") - return f"Error: {str(e)}" - def _execute_view_files_matching(self, search_pattern, file_pattern=None): - """ - Search for pattern in files and add matching files to context as read-only. - This tool lets the LLM search for content within files, mimicking - how a developer would use grep to find relevant code. - """ - try: - # Get list of files to search - if file_pattern: - # Use glob pattern to filter files - all_files = self.get_all_relative_files() - files_to_search = [] - for file in all_files: - if fnmatch.fnmatch(file, file_pattern): - files_to_search.append(file) - - if not files_to_search: - return f"No files matching '{file_pattern}' to search for pattern '{search_pattern}'" - else: - # Search all files if no pattern provided - files_to_search = self.get_all_relative_files() - - # Search for pattern in files - matches = {} - for file in files_to_search: - abs_path = self.abs_root_path(file) - try: - with open(abs_path, 'r', encoding='utf-8') as f: - content = f.read() - if search_pattern in content: - matches[file] = content.count(search_pattern) - except Exception: - # Skip files that can't be read (binary, etc.) - pass - - # Limit the number of files added if there are too many matches - if len(matches) > self.max_files_per_glob: - self.io.tool_output( - f"⚠️ Found '{search_pattern}' in {len(matches)} files, " - f"limiting to {self.max_files_per_glob} files with most matches." - ) - # Sort by number of matches (most matches first) - sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True) - matches = dict(sorted_matches[:self.max_files_per_glob]) - - # Add matching files to context - for file in matches: - self._add_file_to_context(file) - - # Return a user-friendly result - if matches: - # Sort by number of matches (most matches first) - sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True) - match_list = [f"{file} ({count} matches)" for file, count in sorted_matches[:5]] - - if len(sorted_matches) > 5: - self.io.tool_output(f"🔍 Found '{search_pattern}' in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more") - return f"Found in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more" - else: - self.io.tool_output(f"🔍 Found '{search_pattern}' in: {', '.join(match_list)}") - return f"Found in {len(matches)} files: {', '.join(match_list)}" - else: - self.io.tool_output(f"⚠️ Pattern '{search_pattern}' not found in any files") - return f"Pattern not found in any files" - except Exception as e: - self.io.tool_error(f"Error in ViewFilesMatching: {str(e)}") - return f"Error: {str(e)}" - - def _execute_ls(self, dir_path): - """ - List files in directory and optionally add some to context. - - This provides information about the structure of the codebase, - similar to how a developer would explore directories. - """ - try: - # Make the path relative to root if it's absolute - if dir_path.startswith('/'): - rel_dir = os.path.relpath(dir_path, self.root) - else: - rel_dir = dir_path - - # Get absolute path - abs_dir = self.abs_root_path(rel_dir) - - # Check if path exists - if not os.path.exists(abs_dir): - self.io.tool_output(f"⚠️ Directory '{dir_path}' not found") - return f"Directory not found" - - # Get directory contents - contents = [] - try: - with os.scandir(abs_dir) as entries: - for entry in entries: - if entry.is_file() and not entry.name.startswith('.'): - rel_path = os.path.join(rel_dir, entry.name) - contents.append(rel_path) - except NotADirectoryError: - # If it's a file, just return the file - contents = [rel_dir] - - if contents: - self.io.tool_output(f"📋 Listed {len(contents)} file(s) in '{dir_path}'") - if len(contents) > 10: - return f"Found {len(contents)} files: {', '.join(contents[:10])}..." - else: - return f"Found {len(contents)} files: {', '.join(contents)}" - else: - self.io.tool_output(f"📋 No files found in '{dir_path}'") - return f"No files found in directory" - except Exception as e: - self.io.tool_error(f"Error in ls: {str(e)}") - return f"Error: {str(e)}" - - def _execute_view(self, file_path): - """ - Explicitly add a file to context as read-only. - - This gives the LLM explicit control over what files to view, - rather than relying on indirect mentions. - """ - try: - # Use the helper, marking it as an explicit view request - return self._add_file_to_context(file_path, explicit=True) - except Exception as e: - self.io.tool_error(f"Error viewing file: {str(e)}") - return f"Error: {str(e)}" def _add_file_to_context(self, file_path, explicit=False): """ @@ -1295,289 +1139,11 @@ Just reply with fixed versions of the {blocks} above that failed to match. self.io.tool_error(f"Error adding file '{file_path}' for viewing: {str(e)}") return f"Error adding file for viewing: {str(e)}" - def _execute_make_editable(self, file_path): - """ - Convert a read-only file to an editable file. - - This allows the LLM to upgrade a file from read-only to editable - when it determines it needs to make changes to that file. - """ - try: - # Get absolute path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file is already editable - if abs_path in self.abs_fnames: - self.io.tool_output(f"📝 File '{file_path}' is already editable") - return f"File is already editable" - # Check if file exists on disk - if not os.path.isfile(abs_path): - self.io.tool_output(f"⚠️ File '{file_path}' not found") - return f"Error: File not found" - - # File exists, is not editable, might be read-only or not in context yet - was_read_only = False - if abs_path in self.abs_read_only_fnames: - self.abs_read_only_fnames.remove(abs_path) - was_read_only = True - - # Add to editable files - self.abs_fnames.add(abs_path) - - if was_read_only: - self.io.tool_output(f"📝 Moved '{file_path}' from read-only to editable") - return f"File is now editable (moved from read-only)" - else: - # File was not previously in context at all - self.io.tool_output(f"📝 Added '{file_path}' directly to editable context") - # Track if added during exploration? Maybe not needed for direct MakeEditable. - # self.files_added_in_exploration.add(rel_path) # Consider if needed - return f"File is now editable (added directly)" - except Exception as e: - self.io.tool_error(f"Error in MakeEditable for '{file_path}': {str(e)}") - return f"Error: {str(e)}" - - def _execute_make_readonly(self, file_path): - """ - Convert an editable file to a read-only file. - - This allows the LLM to downgrade a file from editable to read-only - when it determines it no longer needs to make changes to that file. - """ - try: - # Get absolute path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_output(f"📚 File '{file_path}' is already read-only") - return f"File is already read-only" - else: - self.io.tool_output(f"⚠️ File '{file_path}' not in context") - return f"File not in context" - - # Move from editable to read-only - self.abs_fnames.remove(abs_path) - self.abs_read_only_fnames.add(abs_path) - - self.io.tool_output(f"📚 Made '{file_path}' read-only") - return f"File is now read-only" - except Exception as e: - self.io.tool_error(f"Error making file read-only: {str(e)}") - return f"Error: {str(e)}" - def _execute_remove(self, file_path): - """ - Explicitly remove a file from context. - - This allows the LLM to clean up its context when files are no - longer needed, keeping the context focused and efficient. - """ - try: - # Get absolute path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - # Check if file is in context (either editable or read-only) - removed = False - if abs_path in self.abs_fnames: - # Don't remove if it's the last editable file and there are no read-only files - if len(self.abs_fnames) <= 1 and not self.abs_read_only_fnames: - self.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context") - return f"Cannot remove - last file in context" - self.abs_fnames.remove(abs_path) - removed = True - elif abs_path in self.abs_read_only_fnames: - # Don't remove if it's the last read-only file and there are no editable files - if len(self.abs_read_only_fnames) <= 1 and not self.abs_fnames: - self.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context") - return f"Cannot remove - last file in context" - self.abs_read_only_fnames.remove(abs_path) - removed = True - if not removed: - self.io.tool_output(f"⚠️ File '{file_path}' not in context") - return f"File not in context" - # Track in recently removed - self.recently_removed[rel_path] = { - 'removed_at': time.time() - } - - self.io.tool_output(f"🗑️ Explicitly removed '{file_path}' from context") - return f"Removed file from context" - except Exception as e: - self.io.tool_error(f"Error removing file: {str(e)}") - return f"Error: {str(e)}" - - def _execute_view_files_with_symbol(self, symbol): - """ - Find files containing a specific symbol and add them to context as read-only. - """ - try: - if not self.repo_map: - self.io.tool_output("⚠️ Repo map not available, cannot use ViewFilesWithSymbol tool.") - return "Repo map not available" - - if not symbol: - return "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" - - self.io.tool_output(f"🔎 Searching for symbol '{symbol}'...") - found_files = set() - current_context_files = self.abs_fnames | self.abs_read_only_fnames - files_to_search = set(self.get_all_abs_files()) - current_context_files - - rel_fname_to_abs = {} - all_tags = [] - - for fname in files_to_search: - rel_fname = self.get_rel_fname(fname) - rel_fname_to_abs[rel_fname] = fname - try: - tags = self.repo_map.get_tags(fname, rel_fname) - all_tags.extend(tags) - except Exception as e: - self.io.tool_warning(f"Could not get tags for {rel_fname}: {e}") - - # Find matching symbols - for tag in all_tags: - if tag.name == symbol: - # Use absolute path directly if available, otherwise resolve from relative path - abs_fname = rel_fname_to_abs.get(tag.rel_fname) or self.abs_root_path(tag.fname) - if abs_fname in files_to_search: # Ensure we only add files we intended to search - found_files.add(abs_fname) - - # Limit the number of files added - if len(found_files) > self.max_files_per_glob: - self.io.tool_output( - f"⚠️ Found symbol '{symbol}' in {len(found_files)} files, " - f"limiting to {self.max_files_per_glob} most relevant files." - ) - # Sort by modification time (most recent first) - approximate relevance - sorted_found_files = sorted(list(found_files), key=lambda f: os.path.getmtime(f), reverse=True) - found_files = set(sorted_found_files[:self.max_files_per_glob]) - - # Add files to context (as read-only) - added_count = 0 - added_files_rel = [] - for abs_file_path in found_files: - rel_path = self.get_rel_fname(abs_file_path) - # Double check it's not already added somehow - if abs_file_path not in self.abs_fnames and abs_file_path not in self.abs_read_only_fnames: - add_result = self._add_file_to_context(rel_path, explicit=True) # Use explicit=True for clear output - if "Added" in add_result: - added_count += 1 - added_files_rel.append(rel_path) - - if added_count > 0: - if added_count > 5: - brief = ', '.join(added_files_rel[:5]) + f', and {added_count-5} more' - self.io.tool_output(f"🔎 Found '{symbol}' and added {added_count} files: {brief}") - else: - self.io.tool_output(f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}") - return f"Found symbol '{symbol}' and added {added_count} files as read-only." - else: - self.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files.") - return f"Symbol '{symbol}' not found in searchable files." - - except Exception as e: - self.io.tool_error(f"Error in ViewFilesWithSymbol: {str(e)}") - return f"Error: {str(e)}" - - def _execute_command(self, command_string): - """ - Execute an aider command after user confirmation. - """ - try: - # Ask for confirmation before executing, allowing 'Always' - # Use the command string itself as the group key to remember preference per command - if not self.io.confirm_ask( - "Allow execution of this command?", - subject=command_string, - explicit_yes_required=True, # Require explicit 'yes' or 'always' - allow_never=True # Enable the 'Don't ask again' option - ): - # Check if the reason for returning False was *not* because it's remembered - # (confirm_ask returns False if 'n' or 'no' is chosen, even if remembered) - # We only want to skip if the user actively said no *this time* or if it's - # remembered as 'never' (which shouldn't happen with allow_never=True, - # but checking io.never_ask_group is robust). - # If the command is in never_ask_group with a True value (meaning Always), - # confirm_ask would have returned True directly. - # So, if confirm_ask returns False here, it means the user chose No this time. - self.io.tool_output(f"Skipped execution of shell command: {command_string}") - return "Shell command execution skipped by user." - - self.io.tool_output(f"⚙️ Executing non-interactive shell command: {command_string}") - - # Use run_cmd_subprocess for non-interactive execution - exit_status, combined_output = run_cmd_subprocess( - command_string, - verbose=self.verbose, - cwd=self.root # Execute in the project root - ) - - # Format the output for the result message, include more content - output_content = combined_output or "" - # Use the existing token threshold constant as the character limit for truncation - output_limit = self.large_file_token_threshold - if len(output_content) > output_limit: - # Truncate and add a clear message using the constant value - output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)" - - if exit_status == 0: - return f"Shell command executed successfully (exit code 0). Output:\n{output_content}" - else: - return f"Shell command failed with exit code {exit_status}. Output:\n{output_content}" - - except Exception as e: - self.io.tool_error(f"Error executing non-interactive shell command '{command_string}': {str(e)}") - # Optionally include traceback for debugging if verbose - # if self.verbose: - # self.io.tool_error(traceback.format_exc()) - return f"Error executing command: {str(e)}" - - def _execute_command_interactive(self, command_string): - """ - Execute an interactive shell command using run_cmd (which uses pexpect/PTY). - """ - try: - self.io.tool_output(f"⚙️ Starting interactive shell command: {command_string}") - self.io.tool_output(">>> You may need to interact with the command below <<<") - - # Use run_cmd which handles PTY logic - exit_status, combined_output = run_cmd( - command_string, - verbose=self.verbose, # Pass verbose flag - error_print=self.io.tool_error, # Use io for error printing - cwd=self.root # Execute in the project root - ) - - self.io.tool_output(">>> Interactive command finished <<<") - - # Format the output for the result message, include more content - output_content = combined_output or "" - # Use the existing token threshold constant as the character limit for truncation - output_limit = self.large_file_token_threshold - if len(output_content) > output_limit: - # Truncate and add a clear message using the constant value - output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)" - - if exit_status == 0: - return f"Interactive command finished successfully (exit code 0). Output:\n{output_content}" - else: - return f"Interactive command finished with exit code {exit_status}. Output:\n{output_content}" - - except Exception as e: - self.io.tool_error(f"Error executing interactive shell command '{command_string}': {str(e)}") - # Optionally include traceback for debugging if verbose - # if self.verbose: - # self.io.tool_error(traceback.format_exc()) - return f"Error executing interactive command: {str(e)}" def _process_file_mentions(self, content): """ @@ -1898,1334 +1464,15 @@ Just reply with fixed versions of the {blocks} above that failed to match. # ------------------- Granular Editing Tools ------------------- - def _execute_replace_text(self, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False): - """ - Replace specific text with new text, optionally using nearby context for disambiguation. - - Parameters: - - file_path: Path to the file to modify - - find_text: Text to find and replace - - replace_text: Text to replace it with - - near_context: Optional text nearby to help locate the correct instance - - occurrence: Which occurrence to replace (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - content = self.io.read_text(abs_path) - if content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before ReplaceText operation.") - return f"Error: Could not read file '{file_path}'" - # Find occurrences using helper function - occurrences = self._find_occurrences(content, find_text, near_context) - - if not occurrences: - err_msg = f"Text '{find_text}' not found" - if near_context: - err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - - # Select the occurrence (handle 1-based index and -1 for last) - num_occurrences = len(occurrences) - try: - occurrence = int(occurrence) # Ensure occurrence is an integer - if occurrence == -1: # Last occurrence - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 # Convert 1-based to 0-based - else: - err_msg = f"Occurrence number {occurrence} is out of range. Found {num_occurrences} occurrences of '{find_text}'" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - except ValueError: - self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" - - start_index = occurrences[target_idx] - - # Perform the replacement - original_content = content - new_content = content[:start_index] + replace_text + content[start_index + len(find_text):] - - if original_content == new_content: - self.io.tool_warning(f"No changes made: replacement text is identical to original") - return f"Warning: No changes made (replacement identical to original)" - - # Generate diff for feedback - diff_example = self._generate_diff_snippet(original_content, start_index, len(find_text), replace_text) - - # Handle dry run (Implements Point 6) - if dry_run: - self.io.tool_output(f"Dry run: Would replace occurrence {occurrence} of '{find_text}' in {file_path}") - return f"Dry run: Would replace text (occurrence {occurrence}). Diff snippet:\n{diff_example}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_index': start_index, - 'find_text': find_text, - 'replace_text': replace_text, - 'near_context': near_context, - 'occurrence': occurrence - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='replacetext', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for ReplaceText: {track_e}") - # Continue even if tracking fails, but warn - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - - # Improve feedback (Point 5 & 6) - occurrence_str = f"occurrence {occurrence}" if num_occurrences > 1 else "text" - self.io.tool_output(f"✅ Replaced {occurrence_str} in {file_path} (change_id: {change_id})") - return f"Successfully replaced {occurrence_str} (change_id: {change_id}). Diff snippet:\n{diff_example}" - - except Exception as e: - self.io.tool_error(f"Error in ReplaceText: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - def _execute_replace_all(self, file_path, find_text, replace_text, change_id=None, dry_run=False): - """ - Replace all occurrences of text in a file. - - Parameters: - - file_path: Path to the file to modify - - find_text: Text to find and replace - - replace_text: Text to replace it with - - change_id: Optional ID for tracking the change - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - content = self.io.read_text(abs_path) - if content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before ReplaceAll operation.") - return f"Error: Could not read file '{file_path}'" - # Count occurrences - count = content.count(find_text) - if count == 0: - self.io.tool_warning(f"Text '{find_text}' not found in file") - return f"Warning: Text not found in file" + - # Perform the replacement - original_content = content - new_content = content.replace(find_text, replace_text) - - if original_content == new_content: - self.io.tool_warning(f"No changes made: replacement text is identical to original") - return f"Warning: No changes made (replacement identical to original)" - - # Generate diff for feedback (more comprehensive for ReplaceAll) - diff_examples = self._generate_diff_chunks(original_content, find_text, replace_text) - - # Handle dry run (Implements Point 6) - if dry_run: - self.io.tool_output(f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}") - return f"Dry run: Would replace {count} occurrences. Diff examples:\n{diff_examples}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'find_text': find_text, - 'replace_text': replace_text, - 'occurrences': count - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='replaceall', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for ReplaceAll: {track_e}") - # Continue even if tracking fails, but warn - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - - # Improve feedback (Point 6) - self.io.tool_output(f"✅ Replaced {count} occurrences in {file_path} (change_id: {change_id})") - return f"Successfully replaced {count} occurrences (change_id: {change_id}). Diff examples:\n{diff_examples}" - - except Exception as e: - self.io.tool_error(f"Error in ReplaceAll: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - - def _execute_insert_block(self, file_path, content, after_pattern=None, before_pattern=None, near_context=None, occurrence=1, change_id=None, dry_run=False): - """ - Insert a block of text after or before a specified pattern. - - Parameters: - - file_path: Path to the file to modify - - content: Text block to insert - - after_pattern: Pattern after which to insert the block (line containing this pattern) - specify one of after/before - - before_pattern: Pattern before which to insert the block (line containing this pattern) - specify one of after/before - - near_context: Optional text nearby to help locate the correct instance of the pattern - - occurrence: Which occurrence of the pattern to use (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - file_content = self.io.read_text(abs_path) - if file_content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before InsertBlock operation.") - return f"Error: Could not read file '{file_path}'" - - # Validate we have either after_pattern or before_pattern, but not both - if after_pattern and before_pattern: - self.io.tool_error("Cannot specify both after_pattern and before_pattern") - return "Error: Cannot specify both after_pattern and before_pattern" - if not after_pattern and not before_pattern: - self.io.tool_error("Must specify either after_pattern or before_pattern") - return "Error: Must specify either after_pattern or before_pattern" - - # Split into lines for easier handling - lines = file_content.splitlines() - original_content = file_content - - # Find occurrences of the pattern (either after_pattern or before_pattern) - pattern = after_pattern if after_pattern else before_pattern - pattern_type = "after" if after_pattern else "before" - - # Find line indices containing the pattern - pattern_line_indices = [] - for i, line in enumerate(lines): - if pattern in line: - # If near_context is provided, check if it's nearby - if near_context: - context_window_start = max(0, i - 5) # Check 5 lines before/after - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - pattern_line_indices.append(i) - else: - pattern_line_indices.append(i) - - if not pattern_line_indices: - err_msg = f"Pattern '{pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - - # Select the occurrence (Implements Point 5) - num_occurrences = len(pattern_line_indices) - try: - occurrence = int(occurrence) # Ensure occurrence is an integer - if occurrence == -1: # Last occurrence - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 # Convert 1-based to 0-based - else: - err_msg = f"Occurrence number {occurrence} is out of range for pattern '{pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - except ValueError: - self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" - - # Determine the final insertion line index - insertion_line_idx = pattern_line_indices[target_idx] - if pattern_type == "after": - insertion_line_idx += 1 # Insert on the line *after* the matched line - # Prepare the content to insert - content_lines = content.splitlines() - - # Create the new lines array - new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:] - new_content = '\n'.join(new_lines) # Use '\n' to match io.write_text behavior - - if original_content == new_content: - self.io.tool_warning(f"No changes made: insertion would not change file") - return f"Warning: No changes made (insertion would not change file)" - - # Generate diff for feedback - diff_snippet = self._generate_diff_snippet_insert(original_content, insertion_line_idx, content_lines) - - # Handle dry run (Implements Point 6) - if dry_run: - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - self.io.tool_output(f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path}") - return f"Dry run: Would insert block. Diff snippet:\n{diff_snippet}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'insertion_line_idx': insertion_line_idx, - 'after_pattern': after_pattern, - 'before_pattern': before_pattern, - 'near_context': near_context, - 'occurrence': occurrence, - 'content': content - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='insertblock', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for InsertBlock: {track_e}") - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - - # Improve feedback (Point 5 & 6) - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - self.io.tool_output(f"✅ Inserted block {pattern_type} {occurrence_str}pattern in {file_path} (change_id: {change_id})") - return f"Successfully inserted block (change_id: {change_id}). Diff snippet:\n{diff_snippet}" - - except Exception as e: - self.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - - def _execute_delete_block(self, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False): - """ - Delete a block of text between start_pattern and end_pattern (inclusive). - - Parameters: - - file_path: Path to the file to modify - - start_pattern: Pattern marking the start of the block to delete (line containing this pattern) - - end_pattern: Optional pattern marking the end of the block (line containing this pattern) - - line_count: Optional number of lines to delete (alternative to end_pattern) - - near_context: Optional text nearby to help locate the correct instance of the start_pattern - - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - file_content = self.io.read_text(abs_path) - if file_content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before DeleteBlock operation.") - return f"Error: Could not read file '{file_path}'" - - # Validate we have either end_pattern or line_count, but not both - if end_pattern and line_count: - self.io.tool_error("Cannot specify both end_pattern and line_count") - return "Error: Cannot specify both end_pattern and line_count" - - # Split into lines for easier handling - lines = file_content.splitlines() - original_content = file_content - - # Find occurrences of the start_pattern (Implements Point 5) - start_pattern_line_indices = [] - for i, line in enumerate(lines): - if start_pattern in line: - # If near_context is provided, check if it's nearby - if near_context: - context_window_start = max(0, i - 5) # Check 5 lines before/after - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - start_pattern_line_indices.append(i) - else: - start_pattern_line_indices.append(i) - - if not start_pattern_line_indices: - err_msg = f"Start pattern '{start_pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - - # Select the occurrence for the start pattern - num_occurrences = len(start_pattern_line_indices) - try: - occurrence = int(occurrence) # Ensure occurrence is an integer - if occurrence == -1: # Last occurrence - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 # Convert 1-based to 0-based - else: - err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - except ValueError: - self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" - - start_line = start_pattern_line_indices[target_idx] - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" # For messages - # Find the end line based on end_pattern or line_count - end_line = -1 - if end_pattern: - # Search for end_pattern *after* the selected start_line - for i in range(start_line, len(lines)): # Include start_line itself if start/end are same line - if end_pattern in lines[i]: - end_line = i - break - - if end_line == -1: - # Improve error message (Point 4) - err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" - elif line_count: - try: - line_count = int(line_count) - if line_count <= 0: - raise ValueError("Line count must be positive") - # Calculate end line based on start line and line count - end_line = min(start_line + line_count - 1, len(lines) - 1) - except ValueError: - self.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") - return f"Error: Invalid line_count value '{line_count}'" - else: - # If neither end_pattern nor line_count is specified, delete just the start line - end_line = start_line - # Prepare the deletion - deleted_lines = lines[start_line:end_line+1] - new_lines = lines[:start_line] + lines[end_line+1:] - new_content = '\n'.join(new_lines) # Use '\n' to match io.write_text behavior - - if original_content == new_content: - self.io.tool_warning(f"No changes made: deletion would not change file") - return f"Warning: No changes made (deletion would not change file)" - - # Generate diff for feedback - diff_snippet = self._generate_diff_snippet_delete(original_content, start_line, end_line) - - # Handle dry run (Implements Point 6) - if dry_run: - self.io.tool_output(f"Dry run: Would delete lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") - return f"Dry run: Would delete block. Diff snippet:\n{diff_snippet}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_line': start_line + 1, # Store 1-based for consistency - 'end_line': end_line + 1, # Store 1-based - 'start_pattern': start_pattern, - 'end_pattern': end_pattern, - 'line_count': line_count, - 'near_context': near_context, - 'occurrence': occurrence, - 'deleted_content': '\n'.join(deleted_lines) - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='deleteblock', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for DeleteBlock: {track_e}") - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - - # Improve feedback (Point 5 & 6) - num_deleted = end_line - start_line + 1 - self.io.tool_output(f"✅ Deleted {num_deleted} lines (from {occurrence_str}start pattern) in {file_path} (change_id: {change_id})") - return f"Successfully deleted {num_deleted} lines (change_id: {change_id}). Diff snippet:\n{diff_snippet}" - - except Exception as e: - self.io.tool_error(f"Error in DeleteBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - - def _execute_undo_change(self, change_id=None, file_path=None): - """ - Undo a specific change by ID, or the last change to a file. - - Parameters: - - change_id: ID of the change to undo - - file_path: Path to file where the last change should be undone - - - Returns a result message. - """ - # Note: Undo does not have a dry_run parameter as it's inherently about reverting a previous action. - try: - # Validate parameters - if change_id is None and file_path is None: - self.io.tool_error("Must specify either change_id or file_path for UndoChange") - return "Error: Must specify either change_id or file_path" # Improve Point 4 - - # If file_path is specified, get the most recent change for that file - if file_path: - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - change_id = self.change_tracker.get_last_change(rel_path) - if not change_id: - # Improve error message (Point 4) - self.io.tool_error(f"No tracked changes found for file '{file_path}' to undo.") - return f"Error: No changes found for file '{file_path}'" - # Attempt to get undo information from the tracker - success, message, change_info = self.change_tracker.undo_change(change_id) - - if not success: - # Improve error message (Point 4) - message from tracker should be specific - self.io.tool_error(f"Failed to undo change '{change_id}': {message}") - return f"Error: {message}" - - # Apply the undo by restoring the original content - if change_info: - file_path = change_info['file_path'] - abs_path = self.abs_root_path(file_path) - # Write the original content back to the file - # No dry_run check here, as undo implies a real action - self.io.write_text(abs_path, change_info['original']) - self.aider_edited_files.add(file_path) # Track that the file was modified by the undo - - change_type = change_info['type'] - # Improve feedback (Point 6) - self.io.tool_output(f"✅ Undid {change_type} change '{change_id}' in {file_path}") - return f"Successfully undid {change_type} change '{change_id}'." - else: - # This case should ideally not be reached if tracker returns success - self.io.tool_error(f"Failed to undo change '{change_id}': Change info missing after successful tracker update.") - return f"Error: Failed to undo change '{change_id}' (missing change info)" - - except Exception as e: - self.io.tool_error(f"Error in UndoChange: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - - def _execute_replace_line(self, file_path, line_number, new_content, change_id=None, dry_run=False): - """ - Replace a specific line identified by line number. - Useful for fixing errors identified by error messages or linters. - - Parameters: - - file_path: Path to the file to modify - - line_number: The line number to replace (1-based) - - new_content: New content for the line - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - file_content = self.io.read_text(abs_path) - if file_content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before ReplaceLine operation.") - return f"Error: Could not read file '{file_path}'" - - # Split into lines - lines = file_content.splitlines() - - # Validate line number - if not isinstance(line_number, int): - try: - line_number = int(line_number) - except ValueError: - self.io.tool_error(f"Line number must be an integer, got '{line_number}'") - # Improve error message (Point 4) - self.io.tool_error(f"Invalid line_number value: '{line_number}'. Must be an integer.") - return f"Error: Invalid line_number value '{line_number}'" - - # Convert 1-based line number (what most editors and error messages use) to 0-based index - idx = line_number - 1 - - if idx < 0 or idx >= len(lines): - # Improve error message (Point 4) - self.io.tool_error(f"Line number {line_number} is out of range for file '{file_path}' (has {len(lines)} lines).") - return f"Error: Line number {line_number} out of range" - - # Store original content for change tracking - original_content = file_content - original_line = lines[idx] - - # Replace the line - lines[idx] = new_content - - # Join lines back into a string - new_content_full = '\n'.join(lines) - - if original_content == new_content_full: - self.io.tool_warning("No changes made: new line content is identical to original") - return f"Warning: No changes made (new content identical to original)" - - # Create a readable diff for the line replacement - diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}" - - # Handle dry run (Implements Point 6) - if dry_run: - self.io.tool_output(f"Dry run: Would replace line {line_number} in {file_path}") - return f"Dry run: Would replace line {line_number}. Diff:\n{diff}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content_full) - - # Track the change - try: - metadata = { - 'line_number': line_number, - 'original_line': original_line, - 'new_line': new_content - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='replaceline', - original_content=original_content, - new_content=new_content_full, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for ReplaceLine: {track_e}") - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - - # Improve feedback (Point 6) - self.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})") - return f"Successfully replaced line {line_number} (change_id: {change_id}). Diff:\n{diff}" - - except Exception as e: - self.io.tool_error(f"Error in ReplaceLine: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - def _execute_replace_lines(self, file_path, start_line, end_line, new_content, change_id=None, dry_run=False): - """ - Replace a range of lines identified by line numbers. - Useful for fixing errors identified by error messages or linters. - - Parameters: - - file_path: Path to the file to modify - - start_line: The first line number to replace (1-based) - - end_line: The last line number to replace (1-based) - - new_content: New content for the lines (can be multi-line) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - file_content = self.io.read_text(abs_path) - if file_content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before ReplaceLines operation.") - return f"Error: Could not read file '{file_path}'" - - # Convert line numbers to integers if needed - if not isinstance(start_line, int): - try: - start_line = int(start_line) - except ValueError: - # Improve error message (Point 4) - self.io.tool_error(f"Invalid start_line value: '{start_line}'. Must be an integer.") - return f"Error: Invalid start_line value '{start_line}'" - - if not isinstance(end_line, int): - try: - end_line = int(end_line) - except ValueError: - # Improve error message (Point 4) - self.io.tool_error(f"Invalid end_line value: '{end_line}'. Must be an integer.") - return f"Error: Invalid end_line value '{end_line}'" - - # Split into lines - lines = file_content.splitlines() - - # Convert 1-based line numbers to 0-based indices - start_idx = start_line - 1 - end_idx = end_line - 1 - # Validate line numbers - if start_idx < 0 or start_idx >= len(lines): - # Improve error message (Point 4) - self.io.tool_error(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).") - return f"Error: Start line {start_line} out of range" - - if end_idx < start_idx or end_idx >= len(lines): - # Improve error message (Point 4) - self.io.tool_error(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).") - return f"Error: End line {end_line} out of range" - - # Store original content for change tracking - original_content = file_content - replaced_lines = lines[start_idx:end_idx+1] - - # Split the new content into lines - new_lines = new_content.splitlines() - - # Perform the replacement - new_full_lines = lines[:start_idx] + new_lines + lines[end_idx+1:] - new_content_full = '\n'.join(new_full_lines) - - if original_content == new_content_full: - self.io.tool_warning("No changes made: new content is identical to original") - return f"Warning: No changes made (new content identical to original)" - - # Create a readable diff for the lines replacement - diff = f"Lines {start_line}-{end_line}:\n" - # Add removed lines with - prefix - for line in replaced_lines: - diff += f"- {line}\n" - # Add separator - diff += "---\n" - # Add new lines with + prefix - for line in new_lines: - diff += f"+ {line}\n" - - # Handle dry run (Implements Point 6) - if dry_run: - self.io.tool_output(f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}") - return f"Dry run: Would replace lines {start_line}-{end_line}. Diff:\n{diff}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content_full) - - # Track the change - try: - metadata = { - 'start_line': start_line, - 'end_line': end_line, - 'replaced_lines': replaced_lines, - 'new_lines': new_lines - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='replacelines', - original_content=original_content, - new_content=new_content_full, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for ReplaceLines: {track_e}") - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - replaced_count = end_line - start_line + 1 - new_count = len(new_lines) - - # Improve feedback (Point 6) - self.io.tool_output(f"✅ Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path} (change_id: {change_id})") - return f"Successfully replaced lines {start_line}-{end_line} with {new_count} new lines (change_id: {change_id}). Diff:\n{diff}" - - except Exception as e: - self.io.tool_error(f"Error in ReplaceLines: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - def _execute_indent_lines(self, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False): - """ - Indent or unindent a block of lines in a file. - - Parameters: - - file_path: Path to the file to modify - - start_pattern: Pattern marking the start of the block to indent (line containing this pattern) - - end_pattern: Optional pattern marking the end of the block (line containing this pattern) - - line_count: Optional number of lines to indent (alternative to end_pattern) - - indent_levels: Number of levels to indent (positive) or unindent (negative) - - near_context: Optional text nearby to help locate the correct instance of the start_pattern - - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. - """ - try: - # Get absolute file path - abs_path = self.abs_root_path(file_path) - rel_path = self.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - self.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in self.abs_fnames: - if abs_path in self.abs_read_only_fnames: - self.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - file_content = self.io.read_text(abs_path) - if file_content is None: - # Provide more specific error (Improves Point 4) - self.io.tool_error(f"Could not read file '{file_path}' before IndentLines operation.") - return f"Error: Could not read file '{file_path}'" - - # Validate we have either end_pattern or line_count, but not both - if end_pattern and line_count: - self.io.tool_error("Cannot specify both end_pattern and line_count") - return "Error: Cannot specify both end_pattern and line_count" - - # Split into lines for easier handling - lines = file_content.splitlines() - original_content = file_content - - # Find occurrences of the start_pattern (Implements Point 5) - start_pattern_line_indices = [] - for i, line in enumerate(lines): - if start_pattern in line: - # If near_context is provided, check if it's nearby - if near_context: - context_window_start = max(0, i - 5) # Check 5 lines before/after - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - start_pattern_line_indices.append(i) - else: - start_pattern_line_indices.append(i) - if not start_pattern_line_indices: - err_msg = f"Start pattern '{start_pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - # Select the occurrence for the start pattern - num_occurrences = len(start_pattern_line_indices) - try: - occurrence = int(occurrence) # Ensure occurrence is an integer - if occurrence == -1: # Last occurrence - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 # Convert 1-based to 0-based - else: - err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - except ValueError: - self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" - - start_line = start_pattern_line_indices[target_idx] - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" # For messages - # Find the end line based on end_pattern or line_count - end_line = -1 - if end_pattern: - # Search for end_pattern *after* the selected start_line - for i in range(start_line, len(lines)): # Include start_line itself if start/end are same line - if end_pattern in lines[i]: - end_line = i - break - - if end_line == -1: - # Improve error message (Point 4) - err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" - elif line_count: - try: - line_count = int(line_count) - if line_count <= 0: - raise ValueError("Line count must be positive") - # Calculate end line based on start line and line count - end_line = min(start_line + line_count - 1, len(lines) - 1) - except ValueError: - self.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") - return f"Error: Invalid line_count value '{line_count}'" - else: - # If neither end_pattern nor line_count is specified, indent just the start line - end_line = start_line - # Determine indentation amount (using spaces for simplicity, could adapt based on file type later) - try: - indent_levels = int(indent_levels) - except ValueError: - self.io.tool_error(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.") - return f"Error: Invalid indent_levels value '{indent_levels}'" - - indent_str = ' ' * 4 # Assume 4 spaces per level - - # Create a temporary copy to calculate the change - modified_lines = list(lines) # Copy the list - - # Apply indentation to the temporary copy - for i in range(start_line, end_line + 1): - if indent_levels > 0: - # Add indentation - modified_lines[i] = (indent_str * indent_levels) + modified_lines[i] - elif indent_levels < 0: - # Remove indentation, but do not remove more than exists - spaces_to_remove = abs(indent_levels) * len(indent_str) - current_leading_spaces = len(modified_lines[i]) - len(modified_lines[i].lstrip(' ')) - actual_remove = min(spaces_to_remove, current_leading_spaces) - if actual_remove > 0: - modified_lines[i] = modified_lines[i][actual_remove:] - # If indent_levels is 0, do nothing - - # Join lines back into a string - new_content = '\n'.join(modified_lines) # Use '\n' to match io.write_text behavior - - if original_content == new_content: - self.io.tool_warning(f"No changes made: indentation would not change file") - return f"Warning: No changes made (indentation would not change file)" - - # Generate diff for feedback - diff_snippet = self._generate_diff_snippet_indent(original_content, new_content, start_line, end_line) - - # Handle dry run (Implements Point 6) - if dry_run: - action = "indent" if indent_levels > 0 else "unindent" - self.io.tool_output(f"Dry run: Would {action} lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") - return f"Dry run: Would {action} block. Diff snippet:\n{diff_snippet}" - - # --- Apply Change (Not dry run) --- - self.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_line': start_line + 1, # Store 1-based - 'end_line': end_line + 1, # Store 1-based - 'start_pattern': start_pattern, - 'end_pattern': end_pattern, - 'line_count': line_count, - 'indent_levels': indent_levels, - 'near_context': near_context, - 'occurrence': occurrence, - } - change_id = self.change_tracker.track_change( - file_path=rel_path, - change_type='indentlines', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking change for IndentLines: {track_e}") - change_id = "TRACKING_FAILED" - - self.aider_edited_files.add(rel_path) - - # Improve feedback (Point 5 & 6) - action = "Indented" if indent_levels > 0 else "Unindented" - levels = abs(indent_levels) - level_text = "level" if levels == 1 else "levels" - num_lines = end_line - start_line + 1 - self.io.tool_output(f"✅ {action} {num_lines} lines (from {occurrence_str}start pattern) by {levels} {level_text} in {file_path} (change_id: {change_id})") - return f"Successfully {action.lower()} {num_lines} lines by {levels} {level_text} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" - - except Exception as e: - self.io.tool_error(f"Error in IndentLines: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - - def _execute_list_changes(self, file_path=None, limit=10): - """ - List recent changes made to files. - - Parameters: - - file_path: Optional path to filter changes by file - - limit: Maximum number of changes to list - - Returns a formatted list of changes. - """ - try: - # If file_path is specified, get the absolute path - rel_file_path = None - if file_path: - abs_path = self.abs_root_path(file_path) - rel_file_path = self.get_rel_fname(abs_path) - - # Get the list of changes - changes = self.change_tracker.list_changes(rel_file_path, limit) - - if not changes: - if file_path: - return f"No changes found for file '{file_path}'" - else: - return "No changes have been made yet" - - # Format the changes into a readable list - result = "Recent changes:\n" - for i, change in enumerate(changes): - change_time = datetime.fromtimestamp(change['timestamp']).strftime('%H:%M:%S') - change_type = change['type'] - file_path = change['file_path'] - change_id = change['id'] - - result += f"{i+1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n" - - self.io.tool_output(result) # Also print to console for user - return result - - except Exception as e: - self.io.tool_error(f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" - - def _execute_extract_lines(self, source_file_path, target_file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, dry_run=False): - """ - Extract a range of lines from a source file and move them to a target file. - - Parameters: - - source_file_path: Path to the file to extract lines from - - target_file_path: Path to the file to append extracted lines to (will be created if needed) - - start_pattern: Pattern marking the start of the block to extract - - end_pattern: Optional pattern marking the end of the block - - line_count: Optional number of lines to extract (alternative to end_pattern) - - near_context: Optional text nearby to help locate the correct instance of the start_pattern - - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - - dry_run: If True, simulate the change without modifying files - - Returns a result message. - """ - try: - # --- Validate Source File --- - abs_source_path = self.abs_root_path(source_file_path) - rel_source_path = self.get_rel_fname(abs_source_path) - - if not os.path.isfile(abs_source_path): - self.io.tool_error(f"Source file '{source_file_path}' not found") - return f"Error: Source file not found" - - if abs_source_path not in self.abs_fnames: - if abs_source_path in self.abs_read_only_fnames: - self.io.tool_error(f"Source file '{source_file_path}' is read-only. Use MakeEditable first.") - return f"Error: Source file is read-only. Use MakeEditable first." - else: - self.io.tool_error(f"Source file '{source_file_path}' not in context") - return f"Error: Source file not in context" - - # --- Validate Target File --- - abs_target_path = self.abs_root_path(target_file_path) - rel_target_path = self.get_rel_fname(abs_target_path) - target_exists = os.path.isfile(abs_target_path) - target_is_editable = abs_target_path in self.abs_fnames - target_is_readonly = abs_target_path in self.abs_read_only_fnames - - if target_exists and not target_is_editable: - if target_is_readonly: - self.io.tool_error(f"Target file '{target_file_path}' exists but is read-only. Use MakeEditable first.") - return f"Error: Target file exists but is read-only. Use MakeEditable first." - else: - # This case shouldn't happen if file exists, but handle defensively - self.io.tool_error(f"Target file '{target_file_path}' exists but is not in context. Add it first.") - return f"Error: Target file exists but is not in context." - - # --- Read Source Content --- - source_content = self.io.read_text(abs_source_path) - if source_content is None: - self.io.tool_error(f"Could not read source file '{source_file_path}' before ExtractLines operation.") - return f"Error: Could not read source file '{source_file_path}'" - - # --- Find Extraction Range --- - if end_pattern and line_count: - self.io.tool_error("Cannot specify both end_pattern and line_count") - return "Error: Cannot specify both end_pattern and line_count" - - source_lines = source_content.splitlines() - original_source_content = source_content - - start_pattern_line_indices = [] - for i, line in enumerate(source_lines): - if start_pattern in line: - if near_context: - context_window_start = max(0, i - 5) - context_window_end = min(len(source_lines), i + 6) - context_block = "\n".join(source_lines[context_window_start:context_window_end]) - if near_context in context_block: - start_pattern_line_indices.append(i) - else: - start_pattern_line_indices.append(i) - - if not start_pattern_line_indices: - err_msg = f"Start pattern '{start_pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in source file '{source_file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" - - num_occurrences = len(start_pattern_line_indices) - try: - occurrence = int(occurrence) - if occurrence == -1: - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 - else: - err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{source_file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" - except ValueError: - self.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" - - start_line = start_pattern_line_indices[target_idx] - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - - end_line = -1 - if end_pattern: - for i in range(start_line, len(source_lines)): - if end_pattern in source_lines[i]: - end_line = i - break - if end_line == -1: - err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{source_file_path}'." - self.io.tool_error(err_msg) - return f"Error: {err_msg}" - elif line_count: - try: - line_count = int(line_count) - if line_count <= 0: raise ValueError("Line count must be positive") - end_line = min(start_line + line_count - 1, len(source_lines) - 1) - except ValueError: - self.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") - return f"Error: Invalid line_count value '{line_count}'" - else: - end_line = start_line # Extract just the start line if no end specified - - # --- Prepare Content Changes --- - extracted_lines = source_lines[start_line:end_line+1] - new_source_lines = source_lines[:start_line] + source_lines[end_line+1:] - new_source_content = '\n'.join(new_source_lines) - - target_content = "" - if target_exists: - target_content = self.io.read_text(abs_target_path) - if target_content is None: - self.io.tool_error(f"Could not read existing target file '{target_file_path}'.") - return f"Error: Could not read target file '{target_file_path}'" - original_target_content = target_content # For tracking - - # Append extracted lines to target content, ensuring a newline if target wasn't empty - extracted_block = '\n'.join(extracted_lines) - if target_content and not target_content.endswith('\n'): - target_content += '\n' # Add newline before appending if needed - new_target_content = target_content + extracted_block - - # --- Generate Diffs --- - source_diff_snippet = self._generate_diff_snippet_delete(original_source_content, start_line, end_line) - target_insertion_line = len(target_content.splitlines()) if target_content else 0 - target_diff_snippet = self._generate_diff_snippet_insert(original_target_content, target_insertion_line, extracted_lines) - - # --- Handle Dry Run --- - if dry_run: - num_extracted = end_line - start_line + 1 - target_action = "append to" if target_exists else "create" - self.io.tool_output(f"Dry run: Would extract {num_extracted} lines (from {occurrence_str}start pattern '{start_pattern}') in {source_file_path} and {target_action} {target_file_path}") - # Provide more informative dry run response with diffs - return ( - f"Dry run: Would extract {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n" - f"Source Diff (Deletion):\n{source_diff_snippet}\n" - f"Target Diff (Insertion):\n{target_diff_snippet}" - ) - - # --- Apply Changes (Not Dry Run) --- - self.io.write_text(abs_source_path, new_source_content) - self.io.write_text(abs_target_path, new_target_content) - - # --- Track Changes --- - source_change_id = "TRACKING_FAILED" - target_change_id = "TRACKING_FAILED" - try: - source_metadata = { - 'start_line': start_line + 1, 'end_line': end_line + 1, - 'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count, - 'near_context': near_context, 'occurrence': occurrence, - 'extracted_content': extracted_block, 'target_file': rel_target_path - } - source_change_id = self.change_tracker.track_change( - file_path=rel_source_path, change_type='extractlines_source', - original_content=original_source_content, new_content=new_source_content, - metadata=source_metadata - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking source change for ExtractLines: {track_e}") - - try: - target_metadata = { - 'insertion_line': target_insertion_line + 1, - 'inserted_content': extracted_block, 'source_file': rel_source_path - } - target_change_id = self.change_tracker.track_change( - file_path=rel_target_path, change_type='extractlines_target', - original_content=original_target_content, new_content=new_target_content, - metadata=target_metadata - ) - except Exception as track_e: - self.io.tool_error(f"Error tracking target change for ExtractLines: {track_e}") - - # --- Update Context --- - self.aider_edited_files.add(rel_source_path) - self.aider_edited_files.add(rel_target_path) - if not target_exists: - # Add the newly created file to editable context - self.abs_fnames.add(abs_target_path) - self.io.tool_output(f"✨ Created and added '{target_file_path}' to editable context.") - - # --- Return Result --- - num_extracted = end_line - start_line + 1 - target_action = "appended to" if target_exists else "created" - self.io.tool_output(f"✅ Extracted {num_extracted} lines from {rel_source_path} (change_id: {source_change_id}) and {target_action} {rel_target_path} (change_id: {target_change_id})") - # Provide more informative success response with change IDs and diffs - return ( - f"Successfully extracted {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n" - f"Source Change ID: {source_change_id}\nSource Diff (Deletion):\n{source_diff_snippet}\n" - f"Target Change ID: {target_change_id}\nTarget Diff (Insertion):\n{target_diff_snippet}" - ) - - except Exception as e: - self.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" # ------------------- Diff Generation Helpers ------------------- diff --git a/aider/tools/command.py b/aider/tools/command.py new file mode 100644 index 000000000..b9ee094a3 --- /dev/null +++ b/aider/tools/command.py @@ -0,0 +1,55 @@ +# Import necessary functions +from aider.run_cmd import run_cmd_subprocess + +def _execute_command(coder, command_string): + """ + Execute a non-interactive shell command after user confirmation. + """ + try: + # Ask for confirmation before executing, allowing 'Always' + # Use the command string itself as the group key to remember preference per command + if not coder.io.confirm_ask( + "Allow execution of this command?", + subject=command_string, + explicit_yes_required=True, # Require explicit 'yes' or 'always' + allow_never=True # Enable the 'Don't ask again' option + ): + # Check if the reason for returning False was *not* because it's remembered + # (confirm_ask returns False if 'n' or 'no' is chosen, even if remembered) + # We only want to skip if the user actively said no *this time* or if it's + # remembered as 'never' (which shouldn't happen with allow_never=True, + # but checking io.never_ask_group is robust). + # If the command is in never_ask_group with a True value (meaning Always), + # confirm_ask would have returned True directly. + # So, if confirm_ask returns False here, it means the user chose No this time. + coder.io.tool_output(f"Skipped execution of shell command: {command_string}") + return "Shell command execution skipped by user." + + coder.io.tool_output(f"⚙️ Executing non-interactive shell command: {command_string}") + + # Use run_cmd_subprocess for non-interactive execution + exit_status, combined_output = run_cmd_subprocess( + command_string, + verbose=coder.verbose, + cwd=coder.root # Execute in the project root + ) + + # Format the output for the result message, include more content + output_content = combined_output or "" + # Use the existing token threshold constant as the character limit for truncation + output_limit = coder.large_file_token_threshold + if len(output_content) > output_limit: + # Truncate and add a clear message using the constant value + output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)" + + if exit_status == 0: + return f"Shell command executed successfully (exit code 0). Output:\n{output_content}" + else: + return f"Shell command failed with exit code {exit_status}. Output:\n{output_content}" + + except Exception as e: + coder.io.tool_error(f"Error executing non-interactive shell command '{command_string}': {str(e)}") + # Optionally include traceback for debugging if verbose + # if coder.verbose: + # coder.io.tool_error(traceback.format_exc()) + return f"Error executing command: {str(e)}" diff --git a/aider/tools/command_interactive.py b/aider/tools/command_interactive.py new file mode 100644 index 000000000..e71e3f88e --- /dev/null +++ b/aider/tools/command_interactive.py @@ -0,0 +1,40 @@ +# Import necessary functions +from aider.run_cmd import run_cmd + +def _execute_command_interactive(coder, command_string): + """ + Execute an interactive shell command using run_cmd (which uses pexpect/PTY). + """ + try: + coder.io.tool_output(f"⚙️ Starting interactive shell command: {command_string}") + coder.io.tool_output(">>> You may need to interact with the command below <<<") + + # Use run_cmd which handles PTY logic + exit_status, combined_output = run_cmd( + command_string, + verbose=coder.verbose, # Pass verbose flag + error_print=coder.io.tool_error, # Use io for error printing + cwd=coder.root # Execute in the project root + ) + + coder.io.tool_output(">>> Interactive command finished <<<") + + # Format the output for the result message, include more content + output_content = combined_output or "" + # Use the existing token threshold constant as the character limit for truncation + output_limit = coder.large_file_token_threshold + if len(output_content) > output_limit: + # Truncate and add a clear message using the constant value + output_content = output_content[:output_limit] + f"\n... (output truncated at {output_limit} characters, based on large_file_token_threshold)" + + if exit_status == 0: + return f"Interactive command finished successfully (exit code 0). Output:\n{output_content}" + else: + return f"Interactive command finished with exit code {exit_status}. Output:\n{output_content}" + + except Exception as e: + coder.io.tool_error(f"Error executing interactive shell command '{command_string}': {str(e)}") + # Optionally include traceback for debugging if verbose + # if coder.verbose: + # coder.io.tool_error(traceback.format_exc()) + return f"Error executing interactive command: {str(e)}" diff --git a/aider/tools/delete_block.py b/aider/tools/delete_block.py new file mode 100644 index 000000000..2f3391ee0 --- /dev/null +++ b/aider/tools/delete_block.py @@ -0,0 +1,171 @@ +import os +import traceback + +def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False): + """ + Delete a block of text between start_pattern and end_pattern (inclusive). + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - start_pattern: Pattern marking the start of the block to delete (line containing this pattern) + - end_pattern: Optional pattern marking the end of the block (line containing this pattern) + - line_count: Optional number of lines to delete (alternative to end_pattern) + - near_context: Optional text nearby to help locate the correct instance of the start_pattern + - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + file_content = coder.io.read_text(abs_path) + if file_content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before DeleteBlock operation.") + return f"Error: Could not read file '{file_path}'" + + # Validate we have either end_pattern or line_count, but not both + if end_pattern and line_count: + coder.io.tool_error("Cannot specify both end_pattern and line_count") + return "Error: Cannot specify both end_pattern and line_count" + + # Split into lines for easier handling + lines = file_content.splitlines() + original_content = file_content + + # Find occurrences of the start_pattern + start_pattern_line_indices = [] + for i, line in enumerate(lines): + if start_pattern in line: + if near_context: + context_window_start = max(0, i - 5) + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + start_pattern_line_indices.append(i) + else: + start_pattern_line_indices.append(i) + + if not start_pattern_line_indices: + err_msg = f"Start pattern '{start_pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + + # Select the occurrence for the start pattern + num_occurrences = len(start_pattern_line_indices) + try: + occurrence = int(occurrence) + if occurrence == -1: + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 + else: + err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + except ValueError: + coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_line = start_pattern_line_indices[target_idx] + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + + # Find the end line based on end_pattern or line_count + end_line = -1 + if end_pattern: + for i in range(start_line, len(lines)): + if end_pattern in lines[i]: + end_line = i + break + if end_line == -1: + err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + elif line_count: + try: + line_count = int(line_count) + if line_count <= 0: raise ValueError("Line count must be positive") + end_line = min(start_line + line_count - 1, len(lines) - 1) + except ValueError: + coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + return f"Error: Invalid line_count value '{line_count}'" + else: + end_line = start_line + + # Prepare the deletion + deleted_lines = lines[start_line:end_line+1] + new_lines = lines[:start_line] + lines[end_line+1:] + new_content = '\n'.join(new_lines) + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: deletion would not change file") + return f"Warning: No changes made (deletion would not change file)" + + # Generate diff for feedback (assuming _generate_diff_snippet_delete is available on coder) + diff_snippet = coder._generate_diff_snippet_delete(original_content, start_line, end_line) + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would delete lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") + return f"Dry run: Would delete block. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'start_line': start_line + 1, + 'end_line': end_line + 1, + 'start_pattern': start_pattern, + 'end_pattern': end_pattern, + 'line_count': line_count, + 'near_context': near_context, + 'occurrence': occurrence, + 'deleted_content': '\n'.join(deleted_lines) + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='deleteblock', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for DeleteBlock: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + # Improve feedback + num_deleted = end_line - start_line + 1 + coder.io.tool_output(f"✅ Deleted {num_deleted} lines (from {occurrence_str}start pattern) in {file_path} (change_id: {change_id})") + return f"Successfully deleted {num_deleted} lines (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + + except Exception as e: + coder.io.tool_error(f"Error in DeleteBlock: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/extract_lines.py b/aider/tools/extract_lines.py new file mode 100644 index 000000000..b4577beb7 --- /dev/null +++ b/aider/tools/extract_lines.py @@ -0,0 +1,220 @@ +import os +import traceback + +def _execute_extract_lines(coder, source_file_path, target_file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, dry_run=False): + """ + Extract a range of lines from a source file and move them to a target file. + + Parameters: + - coder: The Coder instance + - source_file_path: Path to the file to extract lines from + - target_file_path: Path to the file to append extracted lines to (will be created if needed) + - start_pattern: Pattern marking the start of the block to extract + - end_pattern: Optional pattern marking the end of the block + - line_count: Optional number of lines to extract (alternative to end_pattern) + - near_context: Optional text nearby to help locate the correct instance of the start_pattern + - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) + - dry_run: If True, simulate the change without modifying files + + Returns a result message. + """ + try: + # --- Validate Source File --- + abs_source_path = coder.abs_root_path(source_file_path) + rel_source_path = coder.get_rel_fname(abs_source_path) + + if not os.path.isfile(abs_source_path): + coder.io.tool_error(f"Source file '{source_file_path}' not found") + return f"Error: Source file not found" + + if abs_source_path not in coder.abs_fnames: + if abs_source_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"Source file '{source_file_path}' is read-only. Use MakeEditable first.") + return f"Error: Source file is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"Source file '{source_file_path}' not in context") + return f"Error: Source file not in context" + + # --- Validate Target File --- + abs_target_path = coder.abs_root_path(target_file_path) + rel_target_path = coder.get_rel_fname(abs_target_path) + target_exists = os.path.isfile(abs_target_path) + target_is_editable = abs_target_path in coder.abs_fnames + target_is_readonly = abs_target_path in coder.abs_read_only_fnames + + if target_exists and not target_is_editable: + if target_is_readonly: + coder.io.tool_error(f"Target file '{target_file_path}' exists but is read-only. Use MakeEditable first.") + return f"Error: Target file exists but is read-only. Use MakeEditable first." + else: + # This case shouldn't happen if file exists, but handle defensively + coder.io.tool_error(f"Target file '{target_file_path}' exists but is not in context. Add it first.") + return f"Error: Target file exists but is not in context." + + # --- Read Source Content --- + source_content = coder.io.read_text(abs_source_path) + if source_content is None: + coder.io.tool_error(f"Could not read source file '{source_file_path}' before ExtractLines operation.") + return f"Error: Could not read source file '{source_file_path}'" + + # --- Find Extraction Range --- + if end_pattern and line_count: + coder.io.tool_error("Cannot specify both end_pattern and line_count") + return "Error: Cannot specify both end_pattern and line_count" + + source_lines = source_content.splitlines() + original_source_content = source_content + + start_pattern_line_indices = [] + for i, line in enumerate(source_lines): + if start_pattern in line: + if near_context: + context_window_start = max(0, i - 5) + context_window_end = min(len(source_lines), i + 6) + context_block = "\n".join(source_lines[context_window_start:context_window_end]) + if near_context in context_block: + start_pattern_line_indices.append(i) + else: + start_pattern_line_indices.append(i) + + if not start_pattern_line_indices: + err_msg = f"Start pattern '{start_pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in source file '{source_file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + + num_occurrences = len(start_pattern_line_indices) + try: + occurrence = int(occurrence) + if occurrence == -1: + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 + else: + err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{source_file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + except ValueError: + coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_line = start_pattern_line_indices[target_idx] + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + + end_line = -1 + if end_pattern: + for i in range(start_line, len(source_lines)): + if end_pattern in source_lines[i]: + end_line = i + break + if end_line == -1: + err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{source_file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + elif line_count: + try: + line_count = int(line_count) + if line_count <= 0: raise ValueError("Line count must be positive") + end_line = min(start_line + line_count - 1, len(source_lines) - 1) + except ValueError: + coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + return f"Error: Invalid line_count value '{line_count}'" + else: + end_line = start_line # Extract just the start line if no end specified + + # --- Prepare Content Changes --- + extracted_lines = source_lines[start_line:end_line+1] + new_source_lines = source_lines[:start_line] + source_lines[end_line+1:] + new_source_content = '\n'.join(new_source_lines) + + target_content = "" + if target_exists: + target_content = coder.io.read_text(abs_target_path) + if target_content is None: + coder.io.tool_error(f"Could not read existing target file '{target_file_path}'.") + return f"Error: Could not read target file '{target_file_path}'" + original_target_content = target_content # For tracking + + # Append extracted lines to target content, ensuring a newline if target wasn't empty + extracted_block = '\n'.join(extracted_lines) + if target_content and not target_content.endswith('\n'): + target_content += '\n' # Add newline before appending if needed + new_target_content = target_content + extracted_block + + # --- Generate Diffs --- + source_diff_snippet = coder._generate_diff_snippet_delete(original_source_content, start_line, end_line) + target_insertion_line = len(target_content.splitlines()) if target_content else 0 + target_diff_snippet = coder._generate_diff_snippet_insert(original_target_content, target_insertion_line, extracted_lines) + + # --- Handle Dry Run --- + if dry_run: + num_extracted = end_line - start_line + 1 + target_action = "append to" if target_exists else "create" + coder.io.tool_output(f"Dry run: Would extract {num_extracted} lines (from {occurrence_str}start pattern '{start_pattern}') in {source_file_path} and {target_action} {target_file_path}") + # Provide more informative dry run response with diffs + return ( + f"Dry run: Would extract {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n" + f"Source Diff (Deletion):\n{source_diff_snippet}\n" + f"Target Diff (Insertion):\n{target_diff_snippet}" + ) + + # --- Apply Changes (Not Dry Run) --- + coder.io.write_text(abs_source_path, new_source_content) + coder.io.write_text(abs_target_path, new_target_content) + + # --- Track Changes --- + source_change_id = "TRACKING_FAILED" + target_change_id = "TRACKING_FAILED" + try: + source_metadata = { + 'start_line': start_line + 1, 'end_line': end_line + 1, + 'start_pattern': start_pattern, 'end_pattern': end_pattern, 'line_count': line_count, + 'near_context': near_context, 'occurrence': occurrence, + 'extracted_content': extracted_block, 'target_file': rel_target_path + } + source_change_id = coder.change_tracker.track_change( + file_path=rel_source_path, change_type='extractlines_source', + original_content=original_source_content, new_content=new_source_content, + metadata=source_metadata + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking source change for ExtractLines: {track_e}") + + try: + target_metadata = { + 'insertion_line': target_insertion_line + 1, + 'inserted_content': extracted_block, 'source_file': rel_source_path + } + target_change_id = coder.change_tracker.track_change( + file_path=rel_target_path, change_type='extractlines_target', + original_content=original_target_content, new_content=new_target_content, + metadata=target_metadata + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking target change for ExtractLines: {track_e}") + + # --- Update Context --- + coder.aider_edited_files.add(rel_source_path) + coder.aider_edited_files.add(rel_target_path) + if not target_exists: + # Add the newly created file to editable context + coder.abs_fnames.add(abs_target_path) + coder.io.tool_output(f"✨ Created and added '{target_file_path}' to editable context.") + + # --- Return Result --- + num_extracted = end_line - start_line + 1 + target_action = "appended to" if target_exists else "created" + coder.io.tool_output(f"✅ Extracted {num_extracted} lines from {rel_source_path} (change_id: {source_change_id}) and {target_action} {rel_target_path} (change_id: {target_change_id})") + # Provide more informative success response with change IDs and diffs + return ( + f"Successfully extracted {num_extracted} lines from {rel_source_path} and {target_action} {rel_target_path}.\n" + f"Source Change ID: {source_change_id}\nSource Diff (Deletion):\n{source_diff_snippet}\n" + f"Target Change ID: {target_change_id}\nTarget Diff (Insertion):\n{target_diff_snippet}" + ) + + except Exception as e: + coder.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/indent_lines.py b/aider/tools/indent_lines.py new file mode 100644 index 000000000..e3f244faa --- /dev/null +++ b/aider/tools/indent_lines.py @@ -0,0 +1,197 @@ +import os +import traceback + +def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False): + """ + Indent or unindent a block of lines in a file. + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - start_pattern: Pattern marking the start of the block to indent (line containing this pattern) + - end_pattern: Optional pattern marking the end of the block (line containing this pattern) + - line_count: Optional number of lines to indent (alternative to end_pattern) + - indent_levels: Number of levels to indent (positive) or unindent (negative) + - near_context: Optional text nearby to help locate the correct instance of the start_pattern + - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + file_content = coder.io.read_text(abs_path) + if file_content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before IndentLines operation.") + return f"Error: Could not read file '{file_path}'" + + # Validate we have either end_pattern or line_count, but not both + if end_pattern and line_count: + coder.io.tool_error("Cannot specify both end_pattern and line_count") + return "Error: Cannot specify both end_pattern and line_count" + + # Split into lines for easier handling + lines = file_content.splitlines() + original_content = file_content + + # Find occurrences of the start_pattern + start_pattern_line_indices = [] + for i, line in enumerate(lines): + if start_pattern in line: + if near_context: + context_window_start = max(0, i - 5) + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + start_pattern_line_indices.append(i) + else: + start_pattern_line_indices.append(i) + + if not start_pattern_line_indices: + err_msg = f"Start pattern '{start_pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + + # Select the occurrence for the start pattern + num_occurrences = len(start_pattern_line_indices) + try: + occurrence = int(occurrence) + if occurrence == -1: + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 + else: + err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + except ValueError: + coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_line = start_pattern_line_indices[target_idx] + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + + # Find the end line based on end_pattern or line_count + end_line = -1 + if end_pattern: + for i in range(start_line, len(lines)): + if end_pattern in lines[i]: + end_line = i + break + if end_line == -1: + err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + elif line_count: + try: + line_count = int(line_count) + if line_count <= 0: raise ValueError("Line count must be positive") + end_line = min(start_line + line_count - 1, len(lines) - 1) + except ValueError: + coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + return f"Error: Invalid line_count value '{line_count}'" + else: + end_line = start_line + + # Determine indentation amount + try: + indent_levels = int(indent_levels) + except ValueError: + coder.io.tool_error(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.") + return f"Error: Invalid indent_levels value '{indent_levels}'" + + indent_str = ' ' * 4 # Assume 4 spaces per level + + # Create a temporary copy to calculate the change + modified_lines = list(lines) + + # Apply indentation to the temporary copy + for i in range(start_line, end_line + 1): + if indent_levels > 0: + modified_lines[i] = (indent_str * indent_levels) + modified_lines[i] + elif indent_levels < 0: + spaces_to_remove = abs(indent_levels) * len(indent_str) + current_leading_spaces = len(modified_lines[i]) - len(modified_lines[i].lstrip(' ')) + actual_remove = min(spaces_to_remove, current_leading_spaces) + if actual_remove > 0: + modified_lines[i] = modified_lines[i][actual_remove:] + + # Join lines back into a string + new_content = '\n'.join(modified_lines) + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: indentation would not change file") + return f"Warning: No changes made (indentation would not change file)" + + # Generate diff for feedback (assuming _generate_diff_snippet_indent is available on coder) + diff_snippet = coder._generate_diff_snippet_indent(original_content, new_content, start_line, end_line) + + # Handle dry run + if dry_run: + action = "indent" if indent_levels > 0 else "unindent" + coder.io.tool_output(f"Dry run: Would {action} lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") + return f"Dry run: Would {action} block. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'start_line': start_line + 1, + 'end_line': end_line + 1, + 'start_pattern': start_pattern, + 'end_pattern': end_pattern, + 'line_count': line_count, + 'indent_levels': indent_levels, + 'near_context': near_context, + 'occurrence': occurrence, + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='indentlines', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for IndentLines: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + # Improve feedback + action = "Indented" if indent_levels > 0 else "Unindented" + levels = abs(indent_levels) + level_text = "level" if levels == 1 else "levels" + num_lines = end_line - start_line + 1 + coder.io.tool_output(f"✅ {action} {num_lines} lines (from {occurrence_str}start pattern) by {levels} {level_text} in {file_path} (change_id: {change_id})") + return f"Successfully {action.lower()} {num_lines} lines by {levels} {level_text} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + + except Exception as e: + coder.io.tool_error(f"Error in IndentLines: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/insert_block.py b/aider/tools/insert_block.py new file mode 100644 index 000000000..139d8aa4a --- /dev/null +++ b/aider/tools/insert_block.py @@ -0,0 +1,160 @@ +import os +import traceback + +def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None, near_context=None, occurrence=1, change_id=None, dry_run=False): + """ + Insert a block of text after or before a specified pattern. + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - content: Text block to insert + - after_pattern: Pattern after which to insert the block (line containing this pattern) - specify one of after/before + - before_pattern: Pattern before which to insert the block (line containing this pattern) - specify one of after/before + - near_context: Optional text nearby to help locate the correct instance of the pattern + - occurrence: Which occurrence of the pattern to use (1-based index, or -1 for last) + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification (Fixes Point 3: Stale Reads) + file_content = coder.io.read_text(abs_path) + if file_content is None: + # Provide more specific error (Improves Point 4) + coder.io.tool_error(f"Could not read file '{file_path}' before InsertBlock operation.") + return f"Error: Could not read file '{file_path}'" + + # Validate we have either after_pattern or before_pattern, but not both + if after_pattern and before_pattern: + coder.io.tool_error("Cannot specify both after_pattern and before_pattern") + return "Error: Cannot specify both after_pattern and before_pattern" + if not after_pattern and not before_pattern: + coder.io.tool_error("Must specify either after_pattern or before_pattern") + return "Error: Must specify either after_pattern or before_pattern" + + # Split into lines for easier handling + lines = file_content.splitlines() + original_content = file_content + + # Find occurrences of the pattern (either after_pattern or before_pattern) + pattern = after_pattern if after_pattern else before_pattern + pattern_type = "after" if after_pattern else "before" + + # Find line indices containing the pattern + pattern_line_indices = [] + for i, line in enumerate(lines): + if pattern in line: + # If near_context is provided, check if it's nearby + if near_context: + context_window_start = max(0, i - 5) # Check 5 lines before/after + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + pattern_line_indices.append(i) + else: + pattern_line_indices.append(i) + + if not pattern_line_indices: + err_msg = f"Pattern '{pattern}' not found" + if near_context: err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + + # Select the occurrence (Implements Point 5) + num_occurrences = len(pattern_line_indices) + try: + occurrence = int(occurrence) # Ensure occurrence is an integer + if occurrence == -1: # Last occurrence + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based + else: + err_msg = f"Occurrence number {occurrence} is out of range for pattern '{pattern}'. Found {num_occurrences} occurrences" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" # Improve Point 4 + except ValueError: + coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + # Determine the final insertion line index + insertion_line_idx = pattern_line_indices[target_idx] + if pattern_type == "after": + insertion_line_idx += 1 # Insert on the line *after* the matched line + # Prepare the content to insert + content_lines = content.splitlines() + + # Create the new lines array + new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:] + new_content = '\n'.join(new_lines) # Use '\n' to match io.write_text behavior + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: insertion would not change file") + return f"Warning: No changes made (insertion would not change file)" + + # Generate diff for feedback + diff_snippet = coder._generate_diff_snippet_insert(original_content, insertion_line_idx, content_lines) + + # Handle dry run (Implements Point 6) + if dry_run: + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + coder.io.tool_output(f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path}") + return f"Dry run: Would insert block. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'insertion_line_idx': insertion_line_idx, + 'after_pattern': after_pattern, + 'before_pattern': before_pattern, + 'near_context': near_context, + 'occurrence': occurrence, + 'content': content + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='insertblock', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for InsertBlock: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + # Improve feedback (Point 5 & 6) + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + coder.io.tool_output(f"✅ Inserted block {pattern_type} {occurrence_str}pattern in {file_path} (change_id: {change_id})") + return f"Successfully inserted block (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + + except Exception as e: + coder.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback + return f"Error: {str(e)}" \ No newline at end of file diff --git a/aider/tools/list_changes.py b/aider/tools/list_changes.py new file mode 100644 index 000000000..4dfa39721 --- /dev/null +++ b/aider/tools/list_changes.py @@ -0,0 +1,46 @@ +import traceback +from datetime import datetime + +def _execute_list_changes(coder, file_path=None, limit=10): + """ + List recent changes made to files. + + Parameters: + - coder: The Coder instance + - file_path: Optional path to filter changes by file + - limit: Maximum number of changes to list + + Returns a formatted list of changes. + """ + try: + # If file_path is specified, get the absolute path + rel_file_path = None + if file_path: + abs_path = coder.abs_root_path(file_path) + rel_file_path = coder.get_rel_fname(abs_path) + + # Get the list of changes + changes = coder.change_tracker.list_changes(rel_file_path, limit) + + if not changes: + if file_path: + return f"No changes found for file '{file_path}'" + else: + return "No changes have been made yet" + + # Format the changes into a readable list + result = "Recent changes:\n" + for i, change in enumerate(changes): + change_time = datetime.fromtimestamp(change['timestamp']).strftime('%H:%M:%S') + change_type = change['type'] + file_path = change['file_path'] + change_id = change['id'] + + result += f"{i+1}. [{change_id}] {change_time} - {change_type.upper()} on {file_path}\n" + + coder.io.tool_output(result) # Also print to console for user + return result + + except Exception as e: + coder.io.tool_error(f"Error in ListChanges: {str(e)}\n{traceback.format_exc()}") # Add traceback + return f"Error: {str(e)}" diff --git a/aider/tools/ls.py b/aider/tools/ls.py new file mode 100644 index 000000000..42119a0a8 --- /dev/null +++ b/aider/tools/ls.py @@ -0,0 +1,48 @@ +import os + +def execute_ls(coder, dir_path): + """ + List files in directory and optionally add some to context. + + This provides information about the structure of the codebase, + similar to how a developer would explore directories. + """ + try: + # Make the path relative to root if it's absolute + if dir_path.startswith('/'): + rel_dir = os.path.relpath(dir_path, coder.root) + else: + rel_dir = dir_path + + # Get absolute path + abs_dir = coder.abs_root_path(rel_dir) + + # Check if path exists + if not os.path.exists(abs_dir): + coder.io.tool_output(f"⚠️ Directory '{dir_path}' not found") + return f"Directory not found" + + # Get directory contents + contents = [] + try: + with os.scandir(abs_dir) as entries: + for entry in entries: + if entry.is_file() and not entry.name.startswith('.'): + rel_path = os.path.join(rel_dir, entry.name) + contents.append(rel_path) + except NotADirectoryError: + # If it's a file, just return the file + contents = [rel_dir] + + if contents: + coder.io.tool_output(f"📋 Listed {len(contents)} file(s) in '{dir_path}'") + if len(contents) > 10: + return f"Found {len(contents)} files: {', '.join(contents[:10])}..." + else: + return f"Found {len(contents)} files: {', '.join(contents)}" + else: + coder.io.tool_output(f"📋 No files found in '{dir_path}'") + return f"No files found in directory" + except Exception as e: + coder.io.tool_error(f"Error in ls: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/make_editable.py b/aider/tools/make_editable.py new file mode 100644 index 000000000..03c7c0006 --- /dev/null +++ b/aider/tools/make_editable.py @@ -0,0 +1,46 @@ +import os + +# Keep the underscore prefix as this function is primarily for internal coder use +def _execute_make_editable(coder, file_path): + """ + Convert a read-only file to an editable file. + + This allows the LLM to upgrade a file from read-only to editable + when it determines it needs to make changes to that file. + """ + try: + # Get absolute path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file is already editable + if abs_path in coder.abs_fnames: + coder.io.tool_output(f"📝 File '{file_path}' is already editable") + return f"File is already editable" + + # Check if file exists on disk + if not os.path.isfile(abs_path): + coder.io.tool_output(f"⚠️ File '{file_path}' not found") + return f"Error: File not found" + + # File exists, is not editable, might be read-only or not in context yet + was_read_only = False + if abs_path in coder.abs_read_only_fnames: + coder.abs_read_only_fnames.remove(abs_path) + was_read_only = True + + # Add to editable files + coder.abs_fnames.add(abs_path) + + if was_read_only: + coder.io.tool_output(f"📝 Moved '{file_path}' from read-only to editable") + return f"File is now editable (moved from read-only)" + else: + # File was not previously in context at all + coder.io.tool_output(f"📝 Added '{file_path}' directly to editable context") + # Track if added during exploration? Maybe not needed for direct MakeEditable. + # coder.files_added_in_exploration.add(rel_path) # Consider if needed + return f"File is now editable (added directly)" + except Exception as e: + coder.io.tool_error(f"Error in MakeEditable for '{file_path}': {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/make_readonly.py b/aider/tools/make_readonly.py new file mode 100644 index 000000000..a4dac34dc --- /dev/null +++ b/aider/tools/make_readonly.py @@ -0,0 +1,30 @@ +def _execute_make_readonly(coder, file_path): + """ + Convert an editable file to a read-only file. + + This allows the LLM to downgrade a file from editable to read-only + when it determines it no longer needs to make changes to that file. + """ + try: + # Get absolute path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_output(f"📚 File '{file_path}' is already read-only") + return f"File is already read-only" + else: + coder.io.tool_output(f"⚠️ File '{file_path}' not in context") + return f"File not in context" + + # Move from editable to read-only + coder.abs_fnames.remove(abs_path) + coder.abs_read_only_fnames.add(abs_path) + + coder.io.tool_output(f"📚 Made '{file_path}' read-only") + return f"File is now read-only" + except Exception as e: + coder.io.tool_error(f"Error making file read-only: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/remove.py b/aider/tools/remove.py new file mode 100644 index 000000000..40530fe13 --- /dev/null +++ b/aider/tools/remove.py @@ -0,0 +1,45 @@ +import time + +def _execute_remove(coder, file_path): + """ + Explicitly remove a file from context. + + This allows the LLM to clean up its context when files are no + longer needed, keeping the context focused and efficient. + """ + try: + # Get absolute path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file is in context (either editable or read-only) + removed = False + if abs_path in coder.abs_fnames: + # Don't remove if it's the last editable file and there are no read-only files + if len(coder.abs_fnames) <= 1 and not coder.abs_read_only_fnames: + coder.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context") + return f"Cannot remove - last file in context" + coder.abs_fnames.remove(abs_path) + removed = True + elif abs_path in coder.abs_read_only_fnames: + # Don't remove if it's the last read-only file and there are no editable files + if len(coder.abs_read_only_fnames) <= 1 and not coder.abs_fnames: + coder.io.tool_output(f"⚠️ Cannot remove '{file_path}' - it's the only file in context") + return f"Cannot remove - last file in context" + coder.abs_read_only_fnames.remove(abs_path) + removed = True + + if not removed: + coder.io.tool_output(f"⚠️ File '{file_path}' not in context") + return f"File not in context" + + # Track in recently removed + coder.recently_removed[rel_path] = { + 'removed_at': time.time() + } + + coder.io.tool_output(f"🗑️ Explicitly removed '{file_path}' from context") + return f"Removed file from context" + except Exception as e: + coder.io.tool_error(f"Error removing file: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/replace_all.py b/aider/tools/replace_all.py new file mode 100644 index 000000000..468bb15b7 --- /dev/null +++ b/aider/tools/replace_all.py @@ -0,0 +1,95 @@ +import os +import traceback + +def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=None, dry_run=False): + """ + Replace all occurrences of text in a file. + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - find_text: Text to find and replace + - replace_text: Text to replace it with + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + content = coder.io.read_text(abs_path) + if content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceAll operation.") + return f"Error: Could not read file '{file_path}'" + + # Count occurrences + count = content.count(find_text) + if count == 0: + coder.io.tool_warning(f"Text '{find_text}' not found in file") + return f"Warning: Text not found in file" + + # Perform the replacement + original_content = content + new_content = content.replace(find_text, replace_text) + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: replacement text is identical to original") + return f"Warning: No changes made (replacement identical to original)" + + # Generate diff for feedback (more comprehensive for ReplaceAll) + diff_examples = coder._generate_diff_chunks(original_content, find_text, replace_text) + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}") + return f"Dry run: Would replace {count} occurrences. Diff examples:\n{diff_examples}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'find_text': find_text, + 'replace_text': replace_text, + 'occurrences': count + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='replaceall', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for ReplaceAll: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + # Improve feedback + coder.io.tool_output(f"✅ Replaced {count} occurrences in {file_path} (change_id: {change_id})") + return f"Successfully replaced {count} occurrences (change_id: {change_id}). Diff examples:\n{diff_examples}" + + except Exception as e: + coder.io.tool_error(f"Error in ReplaceAll: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/replace_line.py b/aider/tools/replace_line.py new file mode 100644 index 000000000..1c3098f25 --- /dev/null +++ b/aider/tools/replace_line.py @@ -0,0 +1,115 @@ +import os +import traceback + +def _execute_replace_line(coder, file_path, line_number, new_content, change_id=None, dry_run=False): + """ + Replace a specific line identified by line number. + Useful for fixing errors identified by error messages or linters. + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - line_number: The line number to replace (1-based) + - new_content: New content for the line + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + file_content = coder.io.read_text(abs_path) + if file_content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceLine operation.") + return f"Error: Could not read file '{file_path}'" + + # Split into lines + lines = file_content.splitlines() + + # Validate line number + if not isinstance(line_number, int): + try: + line_number = int(line_number) + except ValueError: + coder.io.tool_error(f"Line number must be an integer, got '{line_number}'") + coder.io.tool_error(f"Invalid line_number value: '{line_number}'. Must be an integer.") + return f"Error: Invalid line_number value '{line_number}'" + + # Convert 1-based line number to 0-based index + idx = line_number - 1 + + if idx < 0 or idx >= len(lines): + coder.io.tool_error(f"Line number {line_number} is out of range for file '{file_path}' (has {len(lines)} lines).") + return f"Error: Line number {line_number} out of range" + + # Store original content for change tracking + original_content = file_content + original_line = lines[idx] + + # Replace the line + lines[idx] = new_content + + # Join lines back into a string + new_content_full = '\n'.join(lines) + + if original_content == new_content_full: + coder.io.tool_warning("No changes made: new line content is identical to original") + return f"Warning: No changes made (new content identical to original)" + + # Create a readable diff for the line replacement + diff = f"Line {line_number}:\n- {original_line}\n+ {new_content}" + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would replace line {line_number} in {file_path}") + return f"Dry run: Would replace line {line_number}. Diff:\n{diff}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content_full) + + # Track the change + try: + metadata = { + 'line_number': line_number, + 'original_line': original_line, + 'new_line': new_content + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='replaceline', + original_content=original_content, + new_content=new_content_full, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for ReplaceLine: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + # Improve feedback + coder.io.tool_output(f"✅ Replaced line {line_number} in {file_path} (change_id: {change_id})") + return f"Successfully replaced line {line_number} (change_id: {change_id}). Diff:\n{diff}" + + except Exception as e: + coder.io.tool_error(f"Error in ReplaceLine: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/replace_lines.py b/aider/tools/replace_lines.py new file mode 100644 index 000000000..f6b641e7b --- /dev/null +++ b/aider/tools/replace_lines.py @@ -0,0 +1,139 @@ +import os +import traceback + +def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, change_id=None, dry_run=False): + """ + Replace a range of lines identified by line numbers. + Useful for fixing errors identified by error messages or linters. + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - start_line: The first line number to replace (1-based) + - end_line: The last line number to replace (1-based) + - new_content: New content for the lines (can be multi-line) + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + file_content = coder.io.read_text(abs_path) + if file_content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceLines operation.") + return f"Error: Could not read file '{file_path}'" + + # Convert line numbers to integers if needed + if not isinstance(start_line, int): + try: + start_line = int(start_line) + except ValueError: + coder.io.tool_error(f"Invalid start_line value: '{start_line}'. Must be an integer.") + return f"Error: Invalid start_line value '{start_line}'" + + if not isinstance(end_line, int): + try: + end_line = int(end_line) + except ValueError: + coder.io.tool_error(f"Invalid end_line value: '{end_line}'. Must be an integer.") + return f"Error: Invalid end_line value '{end_line}'" + + # Split into lines + lines = file_content.splitlines() + + # Convert 1-based line numbers to 0-based indices + start_idx = start_line - 1 + end_idx = end_line - 1 + # Validate line numbers + if start_idx < 0 or start_idx >= len(lines): + coder.io.tool_error(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).") + return f"Error: Start line {start_line} out of range" + + if end_idx < start_idx or end_idx >= len(lines): + coder.io.tool_error(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).") + return f"Error: End line {end_line} out of range" + + # Store original content for change tracking + original_content = file_content + replaced_lines = lines[start_idx:end_idx+1] + + # Split the new content into lines + new_lines = new_content.splitlines() + + # Perform the replacement + new_full_lines = lines[:start_idx] + new_lines + lines[end_idx+1:] + new_content_full = '\n'.join(new_full_lines) + + if original_content == new_content_full: + coder.io.tool_warning("No changes made: new content is identical to original") + return f"Warning: No changes made (new content identical to original)" + + # Create a readable diff for the lines replacement + diff = f"Lines {start_line}-{end_line}:\n" + # Add removed lines with - prefix + for line in replaced_lines: + diff += f"- {line}\n" + # Add separator + diff += "---\n" + # Add new lines with + prefix + for line in new_lines: + diff += f"+ {line}\n" + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}") + return f"Dry run: Would replace lines {start_line}-{end_line}. Diff:\n{diff}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content_full) + + # Track the change + try: + metadata = { + 'start_line': start_line, + 'end_line': end_line, + 'replaced_lines': replaced_lines, + 'new_lines': new_lines + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='replacelines', + original_content=original_content, + new_content=new_content_full, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for ReplaceLines: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + replaced_count = end_line - start_line + 1 + new_count = len(new_lines) + + # Improve feedback + coder.io.tool_output(f"✅ Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path} (change_id: {change_id})") + return f"Successfully replaced lines {start_line}-{end_line} with {new_count} new lines (change_id: {change_id}). Diff:\n{diff}" + + except Exception as e: + coder.io.tool_error(f"Error in ReplaceLines: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/replace_text.py b/aider/tools/replace_text.py new file mode 100644 index 000000000..54a149059 --- /dev/null +++ b/aider/tools/replace_text.py @@ -0,0 +1,125 @@ +import os +import traceback + +def _execute_replace_text(coder, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False): + """ + Replace specific text with new text, optionally using nearby context for disambiguation. + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - find_text: Text to find and replace + - replace_text: Text to replace it with + - near_context: Optional text nearby to help locate the correct instance + - occurrence: Which occurrence to replace (1-based index, or -1 for last) + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + content = coder.io.read_text(abs_path) + if content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceText operation.") + return f"Error: Could not read file '{file_path}'" + + # Find occurrences using helper function (assuming _find_occurrences is available on coder) + occurrences = coder._find_occurrences(content, find_text, near_context) + + if not occurrences: + err_msg = f"Text '{find_text}' not found" + if near_context: + err_msg += f" near context '{near_context}'" + err_msg += f" in file '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + + # Select the occurrence + num_occurrences = len(occurrences) + try: + occurrence = int(occurrence) + if occurrence == -1: + target_idx = num_occurrences - 1 + elif occurrence > 0 and occurrence <= num_occurrences: + target_idx = occurrence - 1 + else: + err_msg = f"Occurrence number {occurrence} is out of range. Found {num_occurrences} occurrences of '{find_text}'" + if near_context: err_msg += f" near '{near_context}'" + err_msg += f" in '{file_path}'." + coder.io.tool_error(err_msg) + return f"Error: {err_msg}" + except ValueError: + coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + return f"Error: Invalid occurrence value '{occurrence}'" + + start_index = occurrences[target_idx] + + # Perform the replacement + original_content = content + new_content = content[:start_index] + replace_text + content[start_index + len(find_text):] + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: replacement text is identical to original") + return f"Warning: No changes made (replacement identical to original)" + + # Generate diff for feedback (assuming _generate_diff_snippet is available on coder) + diff_example = coder._generate_diff_snippet(original_content, start_index, len(find_text), replace_text) + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would replace occurrence {occurrence} of '{find_text}' in {file_path}") + return f"Dry run: Would replace text (occurrence {occurrence}). Diff snippet:\n{diff_example}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'start_index': start_index, + 'find_text': find_text, + 'replace_text': replace_text, + 'near_context': near_context, + 'occurrence': occurrence + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='replacetext', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for ReplaceText: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + # Improve feedback + occurrence_str = f"occurrence {occurrence}" if num_occurrences > 1 else "text" + coder.io.tool_output(f"✅ Replaced {occurrence_str} in {file_path} (change_id: {change_id})") + return f"Successfully replaced {occurrence_str} (change_id: {change_id}). Diff snippet:\n{diff_example}" + + except Exception as e: + coder.io.tool_error(f"Error in ReplaceText: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/undo_change.py b/aider/tools/undo_change.py new file mode 100644 index 000000000..9138628d1 --- /dev/null +++ b/aider/tools/undo_change.py @@ -0,0 +1,56 @@ +import traceback + +def _execute_undo_change(coder, change_id=None, file_path=None): + """ + Undo a specific change by ID, or the last change to a file. + + Parameters: + - coder: The Coder instance + - change_id: ID of the change to undo + - file_path: Path to file where the last change should be undone + + Returns a result message. + """ + # Note: Undo does not have a dry_run parameter as it's inherently about reverting a previous action. + try: + # Validate parameters + if change_id is None and file_path is None: + coder.io.tool_error("Must specify either change_id or file_path for UndoChange") + return "Error: Must specify either change_id or file_path" + + # If file_path is specified, get the most recent change for that file + if file_path: + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + change_id = coder.change_tracker.get_last_change(rel_path) + if not change_id: + coder.io.tool_error(f"No tracked changes found for file '{file_path}' to undo.") + return f"Error: No changes found for file '{file_path}'" + + # Attempt to get undo information from the tracker + success, message, change_info = coder.change_tracker.undo_change(change_id) + + if not success: + coder.io.tool_error(f"Failed to undo change '{change_id}': {message}") + return f"Error: {message}" + + # Apply the undo by restoring the original content + if change_info: + file_path = change_info['file_path'] + abs_path = coder.abs_root_path(file_path) + # Write the original content back to the file + coder.io.write_text(abs_path, change_info['original']) + coder.aider_edited_files.add(file_path) # Track that the file was modified by the undo + + change_type = change_info['type'] + coder.io.tool_output(f"✅ Undid {change_type} change '{change_id}' in {file_path}") + return f"Successfully undid {change_type} change '{change_id}'." + else: + # This case should ideally not be reached if tracker returns success + coder.io.tool_error(f"Failed to undo change '{change_id}': Change info missing after successful tracker update.") + return f"Error: Failed to undo change '{change_id}' (missing change info)" + + except Exception as e: + coder.io.tool_error(f"Error in UndoChange: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/view.py b/aider/tools/view.py new file mode 100644 index 000000000..0c833ca30 --- /dev/null +++ b/aider/tools/view.py @@ -0,0 +1,13 @@ +def execute_view(coder, file_path): + """ + Explicitly add a file to context as read-only. + + This gives the LLM explicit control over what files to view, + rather than relying on indirect mentions. + """ + try: + # Use the coder's helper, marking it as an explicit view request + return coder._add_file_to_context(file_path, explicit=True) + except Exception as e: + coder.io.tool_error(f"Error viewing file: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/view_files_at_glob.py b/aider/tools/view_files_at_glob.py new file mode 100644 index 000000000..9dda05d02 --- /dev/null +++ b/aider/tools/view_files_at_glob.py @@ -0,0 +1,55 @@ +import os +import fnmatch + +def execute_view_files_at_glob(coder, pattern): + """ + Execute a glob pattern and add matching files to context as read-only. + + This tool helps the LLM find files by pattern matching, similar to + how a developer would use glob patterns to find files. + """ + try: + # Find files matching the pattern + matching_files = [] + + # Make the pattern relative to root if it's absolute + if pattern.startswith('/'): + pattern = os.path.relpath(pattern, coder.root) + + # Get all files in the repo + all_files = coder.get_all_relative_files() + + # Find matches with pattern matching + for file in all_files: + if fnmatch.fnmatch(file, pattern): + matching_files.append(file) + + # Limit the number of files added if there are too many matches + if len(matching_files) > coder.max_files_per_glob: + coder.io.tool_output( + f"⚠️ Found {len(matching_files)} files matching '{pattern}', " + f"limiting to {coder.max_files_per_glob} most relevant files." + ) + # Sort by modification time (most recent first) + matching_files.sort(key=lambda f: os.path.getmtime(coder.abs_root_path(f)), reverse=True) + matching_files = matching_files[:coder.max_files_per_glob] + + # Add files to context + for file in matching_files: + # Use the coder's internal method to add files + coder._add_file_to_context(file) + + # Return a user-friendly result + if matching_files: + if len(matching_files) > 10: + brief = ', '.join(matching_files[:5]) + f', and {len(matching_files)-5} more' + coder.io.tool_output(f"📂 Added {len(matching_files)} files matching '{pattern}': {brief}") + else: + coder.io.tool_output(f"📂 Added files matching '{pattern}': {', '.join(matching_files)}") + return f"Added {len(matching_files)} files: {', '.join(matching_files[:5])}{' and more' if len(matching_files) > 5 else ''}" + else: + coder.io.tool_output(f"⚠️ No files found matching '{pattern}'") + return f"No files found matching '{pattern}'" + except Exception as e: + coder.io.tool_error(f"Error in ViewFilesAtGlob: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/view_files_matching.py b/aider/tools/view_files_matching.py new file mode 100644 index 000000000..cf0041fe0 --- /dev/null +++ b/aider/tools/view_files_matching.py @@ -0,0 +1,71 @@ +import os +import fnmatch + +def execute_view_files_matching(coder, search_pattern, file_pattern=None): + """ + Search for pattern in files and add matching files to context as read-only. + + This tool lets the LLM search for content within files, mimicking + how a developer would use grep to find relevant code. + """ + try: + # Get list of files to search + if file_pattern: + # Use glob pattern to filter files + all_files = coder.get_all_relative_files() + files_to_search = [] + for file in all_files: + if fnmatch.fnmatch(file, file_pattern): + files_to_search.append(file) + + if not files_to_search: + return f"No files matching '{file_pattern}' to search for pattern '{search_pattern}'" + else: + # Search all files if no pattern provided + files_to_search = coder.get_all_relative_files() + + # Search for pattern in files + matches = {} + for file in files_to_search: + abs_path = coder.abs_root_path(file) + try: + with open(abs_path, 'r', encoding='utf-8') as f: + content = f.read() + if search_pattern in content: + matches[file] = content.count(search_pattern) + except Exception: + # Skip files that can't be read (binary, etc.) + pass + + # Limit the number of files added if there are too many matches + if len(matches) > coder.max_files_per_glob: + coder.io.tool_output( + f"⚠️ Found '{search_pattern}' in {len(matches)} files, " + f"limiting to {coder.max_files_per_glob} files with most matches." + ) + # Sort by number of matches (most matches first) + sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True) + matches = dict(sorted_matches[:coder.max_files_per_glob]) + + # Add matching files to context + for file in matches: + coder._add_file_to_context(file) + + # Return a user-friendly result + if matches: + # Sort by number of matches (most matches first) + sorted_matches = sorted(matches.items(), key=lambda x: x[1], reverse=True) + match_list = [f"{file} ({count} matches)" for file, count in sorted_matches[:5]] + + if len(sorted_matches) > 5: + coder.io.tool_output(f"🔍 Found '{search_pattern}' in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more") + return f"Found in {len(matches)} files: {', '.join(match_list)} and {len(matches)-5} more" + else: + coder.io.tool_output(f"🔍 Found '{search_pattern}' in: {', '.join(match_list)}") + return f"Found in {len(matches)} files: {', '.join(match_list)}" + else: + coder.io.tool_output(f"⚠️ Pattern '{search_pattern}' not found in any files") + return f"Pattern not found in any files" + except Exception as e: + coder.io.tool_error(f"Error in ViewFilesMatching: {str(e)}") + return f"Error: {str(e)}" diff --git a/aider/tools/view_files_with_symbol.py b/aider/tools/view_files_with_symbol.py new file mode 100644 index 000000000..6a847eb70 --- /dev/null +++ b/aider/tools/view_files_with_symbol.py @@ -0,0 +1,75 @@ +import os + +def _execute_view_files_with_symbol(coder, symbol): + """ + Find files containing a specific symbol and add them to context as read-only. + """ + try: + if not coder.repo_map: + coder.io.tool_output("⚠️ Repo map not available, cannot use ViewFilesWithSymbol tool.") + return "Repo map not available" + + if not symbol: + return "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" + + coder.io.tool_output(f"🔎 Searching for symbol '{symbol}'...") + found_files = set() + current_context_files = coder.abs_fnames | coder.abs_read_only_fnames + files_to_search = set(coder.get_all_abs_files()) - current_context_files + + rel_fname_to_abs = {} + all_tags = [] + + for fname in files_to_search: + rel_fname = coder.get_rel_fname(fname) + rel_fname_to_abs[rel_fname] = fname + try: + tags = coder.repo_map.get_tags(fname, rel_fname) + all_tags.extend(tags) + except Exception as e: + coder.io.tool_warning(f"Could not get tags for {rel_fname}: {e}") + + # Find matching symbols + for tag in all_tags: + if tag.name == symbol: + # Use absolute path directly if available, otherwise resolve from relative path + abs_fname = rel_fname_to_abs.get(tag.rel_fname) or coder.abs_root_path(tag.fname) + if abs_fname in files_to_search: # Ensure we only add files we intended to search + found_files.add(abs_fname) + + # Limit the number of files added + if len(found_files) > coder.max_files_per_glob: + coder.io.tool_output( + f"⚠️ Found symbol '{symbol}' in {len(found_files)} files, " + f"limiting to {coder.max_files_per_glob} most relevant files." + ) + # Sort by modification time (most recent first) - approximate relevance + sorted_found_files = sorted(list(found_files), key=lambda f: os.path.getmtime(f), reverse=True) + found_files = set(sorted_found_files[:coder.max_files_per_glob]) + + # Add files to context (as read-only) + added_count = 0 + added_files_rel = [] + for abs_file_path in found_files: + rel_path = coder.get_rel_fname(abs_file_path) + # Double check it's not already added somehow + if abs_file_path not in coder.abs_fnames and abs_file_path not in coder.abs_read_only_fnames: + add_result = coder._add_file_to_context(rel_path, explicit=True) # Use explicit=True for clear output + if "Added" in add_result: + added_count += 1 + added_files_rel.append(rel_path) + + if added_count > 0: + if added_count > 5: + brief = ', '.join(added_files_rel[:5]) + f', and {added_count-5} more' + coder.io.tool_output(f"🔎 Found '{symbol}' and added {added_count} files: {brief}") + else: + coder.io.tool_output(f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}") + return f"Found symbol '{symbol}' and added {added_count} files as read-only." + else: + coder.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files.") + return f"Symbol '{symbol}' not found in searchable files." + + except Exception as e: + coder.io.tool_error(f"Error in ViewFilesWithSymbol: {str(e)}") + return f"Error: {str(e)}" From 9e7ab846e5da161a05091d57cdba3ce77864f496 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 07:10:15 -0400 Subject: [PATCH 20/63] Add the ability to view numbered context and guidance around line-editing. --- aider/coders/navigator_coder.py | 14 +++++ aider/coders/navigator_prompts.py | 30 ++++++++- aider/tools/view_numbered_context.py | 91 ++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 2 deletions(-) create mode 100644 aider/tools/view_numbered_context.py diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index a2e28fe8e..7912aac57 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -54,6 +54,7 @@ from aider.tools.indent_lines import _execute_indent_lines from aider.tools.undo_change import _execute_undo_change from aider.tools.list_changes import _execute_list_changes from aider.tools.extract_lines import _execute_extract_lines +from aider.tools.view_numbered_context import execute_view_numbered_context class NavigatorCoder(Coder): """Mode where the LLM autonomously manages which files are in context.""" @@ -876,6 +877,19 @@ class NavigatorCoder(Coder): else: result_message = "Error: Missing required parameters for ExtractLines (source_file_path, target_file_path, start_pattern)" + elif norm_tool_name == 'viewnumberedcontext': + file_path = params.get('file_path') + pattern = params.get('pattern') + line_number = params.get('line_number') + context_lines = params.get('context_lines', 3) # Default context + + if file_path is not None and (pattern is not None or line_number is not None): + result_message = execute_view_numbered_context( + self, file_path, pattern, line_number, context_lines + ) + else: + result_message = "Error: Missing required parameters for ViewNumberedContext (file_path and either pattern or line_number)" + else: result_message = f"Error: Unknown tool name '{tool_name}'" diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 044461419..dec41ac19 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -99,6 +99,9 @@ Act as an expert software engineer with the ability to autonomously navigate and Extract lines from `start_pattern` to `end_pattern` (or use `line_count`) in `source_file_path` and move them to `target_file_path`. Creates `target_file_path` if it doesn't exist. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. *Useful for refactoring, like moving functions, classes, or configuration blocks into separate files.* +- **ViewNumberedContext**: `[tool_call(ViewNumberedContext, file_path="path/to/file.py", pattern="optional_text", line_number=optional_int, context_lines=3)]` + Displays numbered lines from `file_path` centered around a target location, without adding the file to context. Provide *either* `pattern` (to find the first occurrence) *or* `line_number` (1-based) to specify the center point. Returns the target line(s) plus `context_lines` (default 3) of surrounding context directly in the result message. Crucial for verifying exact line numbers and content before using `ReplaceLine` or `ReplaceLines`. + ### Other Tools - **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` Execute a *non-interactive* shell command. Requires user confirmation. Use for commands that don't need user input (e.g., `ls`, `git status`, `cat file`). @@ -158,6 +161,29 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. * Use `ListChanges` to see a history of applied changes. * If you review a result diff (from a direct edit) and find the change was incorrect or applied in the wrong place, use `[tool_call(UndoChange, change_id="...")]` in your *next* message, using the `change_id` provided in the result message. Then, attempt the corrected edit. +**Using Line Number Based Tools (`ReplaceLine`, `ReplaceLines`):** +* **High Risk:** Line numbers are fragile and can become outdated due to preceding edits, even within the same multi-tool message. Using these tools without recent verification can lead to incorrect changes. +* **Mandatory Verification Workflow:** + 1. **Identify Target Location:** Determine the approximate location using line numbers (e.g., from linter output) or nearby text. + 2. **View Numbered Context (Separate Turn):** In one message, use `ViewNumberedContext` specifying *either* the `line_number` or a nearby `pattern` to display numbered lines for the target area. + ``` + # Example using line number + --- + [tool_call(ViewNumberedContext, file_path="path/to/file.py", line_number=APPROX_LINE, context_lines=5)] + ``` + ``` + # Example using pattern + --- + [tool_call(ViewNumberedContext, file_path="path/to/file.py", pattern="text_near_target", context_lines=5)] + ``` + 3. **Verify:** Carefully examine the numbered output in the result message to confirm the *exact* line numbers and content you intend to modify. + 4. **Edit (Next Turn):** Only in the *next* message, issue the `ReplaceLine` or `ReplaceLines` command using the verified line numbers. + ``` + --- + [tool_call(ReplaceLine, file_path="path/to/file.py", line_number=VERIFIED_LINE, new_content="...")] + ``` +* **Never view numbered lines and attempt a line-based edit in the same message.** + ### Context Management Strategy - Keep your context focused by removing files that are no longer relevant - For large codebases, maintain only 5-15 files in context at once for best performance @@ -177,13 +203,13 @@ For precise, targeted edits to code, use the granular editing tools: - **ReplaceAll**: Replace all occurrences of text in a file (e.g., rename variables) - **InsertBlock**: Insert multi-line blocks of code at specific locations - **DeleteBlock**: Remove specific sections of code -- **ReplaceLine/ReplaceLines**: Fix specific line numbers from error messages or linters +- **ReplaceLine/ReplaceLines**: Fix specific line numbers from error messages or linters (use with caution, see workflow below) - **IndentLines**: Adjust indentation of code blocks - **UndoChange**: Reverse specific changes by ID if you make a mistake #### When to Use Line Number Based Tools -When dealing with errors or warnings that include line numbers, prefer the line-based editing tools: +When dealing with errors or warnings that include line numbers, you *can* use the line-based editing tools, but **you MUST follow the mandatory verification workflow described in the `## Granular Editing Workflow` section above.** This involves using `ViewNumberedContext` in one turn to verify the lines, and then using `ReplaceLine`/`ReplaceLines` in the *next* turn. ``` Error in /path/to/file.py line 42: Syntax error: unexpected token diff --git a/aider/tools/view_numbered_context.py b/aider/tools/view_numbered_context.py new file mode 100644 index 000000000..775e62b5e --- /dev/null +++ b/aider/tools/view_numbered_context.py @@ -0,0 +1,91 @@ +import os + +def execute_view_numbered_context(coder, file_path, pattern=None, line_number=None, context_lines=3): + """ + Displays numbered lines from file_path centered around a target location + (pattern or line_number), without adding the file to context. + """ + error_message = None + if not (pattern is None) ^ (line_number is None): + error_message = "Provide exactly one of 'pattern' or 'line_number'." + coder.io.tool_error(error_message) + return f"Error: {error_message}" + + abs_path = coder.abs_root_path(file_path) + if not os.path.exists(abs_path): + error_message = f"File not found: {file_path}" + coder.io.tool_error(error_message) + return f"Error: {error_message}" + + try: + content = coder.io.read_text(abs_path) + if content is None: + error_message = f"Could not read file: {file_path}" + coder.io.tool_error(error_message) + return f"Error: {error_message}" + lines = content.splitlines() + num_lines = len(lines) + + center_line_idx = -1 + found_by = "" + + if line_number is not None: + try: + line_number_int = int(line_number) + if 1 <= line_number_int <= num_lines: + center_line_idx = line_number_int - 1 # Convert to 0-based index + found_by = f"line {line_number_int}" + else: + error_message = f"Line number {line_number_int} is out of range (1-{num_lines}) for {file_path}." + coder.io.tool_error(error_message) + return f"Error: {error_message}" + except ValueError: + error_message = f"Invalid line number '{line_number}'. Must be an integer." + coder.io.tool_error(error_message) + return f"Error: {error_message}" + + elif pattern is not None: + first_match_line_idx = -1 + for i, line in enumerate(lines): + if pattern in line: + first_match_line_idx = i + break + + if first_match_line_idx != -1: + center_line_idx = first_match_line_idx + found_by = f"pattern '{pattern}' on line {center_line_idx + 1}" + else: + error_message = f"Pattern '{pattern}' not found in {file_path}." + coder.io.tool_error(error_message) + return f"Error: {error_message}" + + if center_line_idx == -1: + # Should not happen if logic above is correct, but as a safeguard + error_message = "Could not determine center line." + coder.io.tool_error(error_message) + return f"Error: {error_message}" + + # Calculate context window + try: + context_lines_int = int(context_lines) + except ValueError: + coder.io.tool_warning(f"Invalid context_lines value '{context_lines}', using default 3.") + context_lines_int = 3 + + start_line_idx = max(0, center_line_idx - context_lines_int) + end_line_idx = min(num_lines - 1, center_line_idx + context_lines_int) + + # Format output + output_lines = [f"Displaying context around {found_by} in {file_path}:"] + max_line_num_width = len(str(end_line_idx + 1)) # Width for padding + + for i in range(start_line_idx, end_line_idx + 1): + line_num_str = str(i + 1).rjust(max_line_num_width) + output_lines.append(f"{line_num_str} | {lines[i]}") + + return "\n".join(output_lines) + + except Exception as e: + error_message = f"Error processing {file_path}: {e}" + coder.io.tool_error(error_message) + return f"Error: {error_message}" From 2b13763c9a2756f7ffb30b7d1bb48a05b830d9be Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 07:28:17 -0400 Subject: [PATCH 21/63] Update guidance around granular tool edits. --- aider/coders/navigator_prompts.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index dec41ac19..5304fdf37 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -152,12 +152,13 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. 3. **Apply Edits (Default: Direct Edit)**: * For most edits where you are confident in the parameters (file path, patterns, line numbers), apply the change directly using the tool with `dry_run=False` (or omitting the parameter). * **Crucially, always review the diff snippet provided in the `[Result (ToolName): ...]` message** to confirm the change was applied correctly and in the intended location. -4. **(Optional) Use `dry_run=True` for Higher Risk:** Consider using `dry_run=True` *before* applying the actual edit if the situation involves higher risk, such as: +4. **Verify Pattern Matches Before Editing:** For pattern-based tools (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ExtractLines`, `ReplaceText`), **you MUST first carefully examine the complete file content already provided in the chat context** to confirm your `start_pattern`, `end_pattern`, and `near_context` parameters uniquely identify the *exact* target location. Do *not* rely on memory or previous views; always check the current context. This verification does *not* require `ViewNumberedContext`. +5. **(Optional) Use `dry_run=True` for Higher Risk:** Consider using `dry_run=True` *before* applying the actual edit if the situation involves higher risk, such as: * Using `ReplaceAll`, especially with potentially common search text. - * Using pattern-based tools (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ReplaceText`) where the pattern might occur multiple times and `near_context`/`occurrence` might not guarantee targeting the correct instance. + * Using pattern-based tools (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ReplaceText`) where the pattern might occur multiple times and `near_context`/`occurrence` might not guarantee targeting the correct instance, *even after performing the verification step above*. * Using line-number based tools (`ReplaceLine`, `ReplaceLines`) *after* other edits have already been made to the *same file* within the *same message*, as line numbers might have shifted unexpectedly. * If using `dry_run=True`, review the simulated diff in the result. If it looks correct, issue the *exact same tool call* again with `dry_run=False` (or omitted). -5. **Review and Recover:** +6. **Review and Recover:** * Use `ListChanges` to see a history of applied changes. * If you review a result diff (from a direct edit) and find the change was incorrect or applied in the wrong place, use `[tool_call(UndoChange, change_id="...")]` in your *next* message, using the `change_id` provided in the result message. Then, attempt the corrected edit. @@ -185,10 +186,11 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. * **Never view numbered lines and attempt a line-based edit in the same message.** ### Context Management Strategy -- Keep your context focused by removing files that are no longer relevant -- For large codebases, maintain only 5-15 files in context at once for best performance -- Files are added as read-only by default; only make files editable when you need to modify them -- Toggle context management with `/context-management` if you need complete content of large files +- **Remember: Files added with `View` or `MakeEditable` remain fully visible in the context for subsequent messages until you explicitly `Remove` them.** +- Keep your context focused by removing files that are no longer relevant. +- For large codebases, maintain only 5-15 files in context at once for best performance. +- Files are added as read-only by default; only make files editable when you need to modify them. +- Toggle context management with `/context-management` if you need complete content of large files. From 6691f626fa67657cd63854d1cb9320cc89e8bf2e Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 07:31:10 -0400 Subject: [PATCH 22/63] Prompt refinements, to try to guide the LLM's style more --- aider/coders/navigator_prompts.py | 113 +++++++++++++++++------------- 1 file changed, 66 insertions(+), 47 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 5304fdf37..03cb7b760 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -16,15 +16,21 @@ class NavigatorPrompts(CoderPrompts): ## Role and Purpose Act as an expert software engineer with the ability to autonomously navigate and modify a codebase. +### Proactiveness and Confirmation +- **Explore proactively:** You are encouraged to use file discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context management tools (`View`, `Remove`) autonomously to gather information needed to fulfill the user's request. Use tool calls to continue exploration across multiple turns. +- **Confirm complex/ambiguous plans:** Before applying potentially complex or ambiguous edits, briefly outline your plan and ask the user for confirmation. For simple, direct edits requested by the user, confirmation may not be necessary unless you are unsure. + ## Response Style Guidelines -- Be concise and direct in your responses -- Focus on answering the specific question asked -- For complex tasks, provide structured step-by-step explanations -- When exploring, clearly indicate your search strategy -- When editing, explain your changes briefly before presenting edit blocks -- For ambiguous references to lists or items, prioritize user-mentioned content over system-defined items -- Use markdown for formatting when appropriate -- End with a clear call-to-action or conclusion +- **Be extremely concise and direct.** Prioritize brevity in all responses. +- **Minimize output tokens.** Only provide essential information. +- **Answer the specific question asked.** Avoid tangential information or elaboration unless requested. +- **Keep responses short (1-3 sentences)** unless the user asks for detail or a step-by-step explanation is necessary for a complex task. +- **Avoid unnecessary preamble or postamble.** Do not start with "Okay, I will..." or end with summaries unless crucial. +- When exploring, *briefly* indicate your search strategy. +- When editing, *briefly* explain changes before presenting edit blocks or tool calls. +- For ambiguous references, prioritize user-mentioned items. +- Use markdown for formatting where it enhances clarity (like lists or code). +- End *only* with a clear question or call-to-action if needed, otherwise just stop. @@ -113,15 +119,23 @@ When you include any tool call, the system will automatically continue to the ne -## Navigation Workflow +## Navigation and Task Workflow + +### General Task Flow +1. **Understand Request:** Ensure you fully understand the user's goal. Ask clarifying questions if needed. +2. **Explore & Search:** Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context tools (`View`) proactively to locate relevant files and understand the existing code. Use `Remove` to keep context focused. +3. **Plan Changes (If Editing):** Determine the necessary edits. For complex changes, outline your plan briefly for the user. +4. **Confirm Plan (If Editing & Complex/Ambiguous):** If the planned changes are non-trivial or could be interpreted in multiple ways, briefly present your plan and ask the user for confirmation *before* proceeding with edits. +5. **Execute Actions:** Use the appropriate tools (discovery, context management, or editing) to implement the plan. Remember to use `MakeEditable` before attempting edits. +6. **Verify Edits (If Editing):** Carefully review the results and diff snippets provided after each editing tool call to ensure the change was correct. +7. **Final Response:** Provide the final answer or result. Omit tool calls unless further exploration is needed. ### Exploration Strategy -1. **Initial Discovery**: Use `ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, or `ViewFilesWithSymbol` to identify relevant files -2. **Focused Investigation**: Add promising files to context with `View` -3. **Context Management**: Remove irrelevant files with `Remove` to maintain focus -4. **Preparation for Editing**: Convert files to editable with `MakeEditable` when needed -5. **Continued Exploration**: Include any tool call to automatically continue to the next round -6. **Final Response**: Omit all tool calls when you have sufficient information to provide a final answer +- Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. +- Add promising files to context with `View` for focused investigation. +- Remove irrelevant files with `Remove` to maintain focus. +- Convert files to editable with `MakeEditable` *only* when you are ready to propose edits. +- Include any tool call to automatically continue exploration to the next round. ### Tool Usage Best Practices - All tool calls MUST be placed after a '---' line separator at the end of your message @@ -145,22 +159,25 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. ## Granular Editing Workflow -**Note on Sequential Edits:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. Always check the result message and diff snippet after each edit. +**Sequential Edits Warning:** Tool calls within a single message execute sequentially. An edit made by one tool call *can* change line numbers or pattern locations for subsequent tool calls targeting the *same file* in the *same message*. **Always check the result message and diff snippet after each edit.** -1. **Discover and View Files**: Use `ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol` to locate relevant files. Use `View` to add specific files. -2. **Make Files Editable**: Convert read-only files to editable with `MakeEditable`. For efficiency, you may include this tool call in the *same message* as the edit tool calls or SEARCH/REPLACE blocks that follow for the same file. -3. **Apply Edits (Default: Direct Edit)**: - * For most edits where you are confident in the parameters (file path, patterns, line numbers), apply the change directly using the tool with `dry_run=False` (or omitting the parameter). - * **Crucially, always review the diff snippet provided in the `[Result (ToolName): ...]` message** to confirm the change was applied correctly and in the intended location. -4. **Verify Pattern Matches Before Editing:** For pattern-based tools (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ExtractLines`, `ReplaceText`), **you MUST first carefully examine the complete file content already provided in the chat context** to confirm your `start_pattern`, `end_pattern`, and `near_context` parameters uniquely identify the *exact* target location. Do *not* rely on memory or previous views; always check the current context. This verification does *not* require `ViewNumberedContext`. -5. **(Optional) Use `dry_run=True` for Higher Risk:** Consider using `dry_run=True` *before* applying the actual edit if the situation involves higher risk, such as: - * Using `ReplaceAll`, especially with potentially common search text. - * Using pattern-based tools (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ReplaceText`) where the pattern might occur multiple times and `near_context`/`occurrence` might not guarantee targeting the correct instance, *even after performing the verification step above*. - * Using line-number based tools (`ReplaceLine`, `ReplaceLines`) *after* other edits have already been made to the *same file* within the *same message*, as line numbers might have shifted unexpectedly. - * If using `dry_run=True`, review the simulated diff in the result. If it looks correct, issue the *exact same tool call* again with `dry_run=False` (or omitted). -6. **Review and Recover:** - * Use `ListChanges` to see a history of applied changes. - * If you review a result diff (from a direct edit) and find the change was incorrect or applied in the wrong place, use `[tool_call(UndoChange, change_id="...")]` in your *next* message, using the `change_id` provided in the result message. Then, attempt the corrected edit. +1. **Discover and View Files**: Use discovery tools and `View` as needed. +2. **Make Files Editable**: Use `MakeEditable` for files you intend to change. Can be combined in the same message as subsequent edits to that file. +3. **Plan & Confirm Edits (If Needed)**: Determine necessary edits. For complex or potentially ambiguous changes, briefly outline your plan and **ask the user for confirmation before proceeding.** For simple, direct changes, proceed to verification. +4. **Verify Parameters Before Execution:** + * **Pattern-Based Tools** (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ExtractLines`, `ReplaceText`): **Crucially, before executing the tool call, carefully examine the complete file content *already visible in the chat context*** to confirm your `start_pattern`, `end_pattern`, `near_context`, and `occurrence` parameters target the *exact* intended location. Do *not* rely on memory. This verification uses the existing context, *not* `ViewNumberedContext`. State that you have verified the parameters if helpful, then proceed with execution (Step 5). + * **Line-Number Based Tools** (`ReplaceLine`, `ReplaceLines`): **Mandatory Verification Workflow:** Follow the strict two-turn process using `ViewNumberedContext` as detailed below. Never view and edit lines in the same turn. +5. **Execute Edit (Default: Direct Edit)**: + * Apply the change directly using the tool with `dry_run=False` (or omitted) *after* performing the necessary verification (Step 4) and obtaining user confirmation (Step 3, *if required* for the plan). + * **Immediately review the diff snippet in the `[Result (ToolName): ...]` message** to confirm the change was correct. +6. **(Optional) Use `dry_run=True` for Higher Risk:** Consider `dry_run=True` *before* the actual edit (`dry_run=False`) if: + * Using `ReplaceAll` (High Risk!). + * Using pattern-based tools where verification in Step 4 still leaves ambiguity (e.g., multiple similar patterns). + * Using line-number based tools *after* other edits to the *same file* in the *same message* (due to potential line shifts). + * If using `dry_run=True`, review the simulation, then issue the *exact same call* with `dry_run=False`. +7. **Review and Recover:** + * Use `ListChanges` to review history. + * If a direct edit's result diff shows an error, **immediately use `[tool_call(UndoChange, change_id="...")]` in your *next* message** before attempting a corrected edit. **Using Line Number Based Tools (`ReplaceLine`, `ReplaceLines`):** * **High Risk:** Line numbers are fragile and can become outdated due to preceding edits, even within the same multi-tool message. Using these tools without recent verification can lead to incorrect changes. @@ -196,18 +213,19 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. ## Code Editing Process -### Granular Editing with Tool Calls (Preferred Method) -**Strongly prefer using the granular editing tools below for all code modifications.** They offer precision and reduce the risk of errors compared to SEARCH/REPLACE blocks. Only resort to SEARCH/REPLACE for complex, multi-location refactoring where granular tools would be exceptionally cumbersome. +### Granular Editing with Tool Calls (Strongly Preferred Method) +**Use the granular editing tools whenever possible.** They offer the most precision and safety. Only use SEARCH/REPLACE as a fallback for complex refactoring where tools are impractical. -For precise, targeted edits to code, use the granular editing tools: - -- **ReplaceText**: Replace specific instances of text in a file -- **ReplaceAll**: Replace all occurrences of text in a file (e.g., rename variables) -- **InsertBlock**: Insert multi-line blocks of code at specific locations -- **DeleteBlock**: Remove specific sections of code -- **ReplaceLine/ReplaceLines**: Fix specific line numbers from error messages or linters (use with caution, see workflow below) -- **IndentLines**: Adjust indentation of code blocks -- **UndoChange**: Reverse specific changes by ID if you make a mistake +**Available Granular Tools:** +- `ReplaceText`: For specific text instances. +- `ReplaceAll`: **Use with extreme caution!** Best suited for targeted renaming across a file. Consider `dry_run=True` first. Can easily cause unintended changes if `find_text` is common. +- `InsertBlock`: For adding code blocks. +- `DeleteBlock`: For removing code sections. +- `ReplaceLine`/`ReplaceLines`: For line-specific fixes (requires strict `ViewNumberedContext` verification). +- `IndentLines`: For adjusting indentation. +- `ExtractLines`: For moving code between files. +- `UndoChange`: For reverting specific edits. +- `ListChanges`: For reviewing edit history. #### When to Use Line Number Based Tools @@ -284,12 +302,13 @@ NOTE that this uses four backticks as the fence and not three! - To move code within a file, use two separate SEARCH/REPLACE blocks - Respect the file paths exactly as they appear -### Error Handling -- If a tool call returns an error message, analyze the error and try correcting the tool call parameters. -- If a tool call succeeds but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects), use `[tool_call(UndoChange, change_id="...")]` in your next message to revert it before attempting a corrected version. -- Refine search patterns or use `near_context`/`occurrence` if edits affect the wrong location. -- Use the enhanced context blocks (directory structure and git status) to re-orient yourself if needed. -- Use `ListChanges` to review the sequence of successful changes. +### Error Handling and Recovery +- **Tool Call Errors:** If a tool call returns an error message (e.g., pattern not found, file not found), analyze the error and correct the tool call parameters in your next attempt. +- **Incorrect Edits:** If a tool call *succeeds* but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects): + 1. **Immediately use `[tool_call(UndoChange, change_id="...")]` in your *very next* message**, using the `change_id` provided in the result. Do not attempt other actions first. + 2. After undoing, analyze why the edit was incorrect (e.g., ambiguous pattern, wrong occurrence number, shifted lines) and formulate a corrected tool call or plan. +- **Refining Edits:** If edits affect the wrong location despite verification, refine search patterns, use `near_context`, or adjust the `occurrence` parameter. +- **Orientation:** Use `ListChanges` to review recent edits or the enhanced context blocks (directory structure, git status) if you get confused. Always reply to the user in {language}. From 646ed365274050fb1593198b80c44cd891e16a4c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 07:42:08 -0400 Subject: [PATCH 23/63] Remind the LLM that it doesn't need to use View on files found by Discovery --- aider/coders/navigator_prompts.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 03cb7b760..96904d21b 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -38,18 +38,18 @@ Act as an expert software engineer with the ability to autonomously navigate and ### File Discovery Tools - **ViewFilesAtGlob**: `[tool_call(ViewFilesAtGlob, pattern="**/*.py")]` - Find files matching a glob pattern and add them to context as read-only. + Find files matching a glob pattern. **Found files are automatically added to context as read-only.** Supports patterns like "src/**/*.ts" or "*.json". - **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py")]` - Search for text in files and add matching files to context as read-only. + Search for text in files. **Matching files are automatically added to context as read-only.** Files with more matches are prioritized. `file_pattern` is optional. - **Ls**: `[tool_call(Ls, directory="src/components")]` List files in a directory. Useful for exploring the project structure. - **ViewFilesWithSymbol**: `[tool_call(ViewFilesWithSymbol, symbol="my_function")]` - Find files containing a specific symbol (function, class, variable) and add them to context as read-only. + Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.** Leverages the repo map for accurate symbol lookup. ### Context Management Tools @@ -131,8 +131,8 @@ When you include any tool call, the system will automatically continue to the ne 7. **Final Response:** Provide the final answer or result. Omit tool calls unless further exploration is needed. ### Exploration Strategy -- Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. -- Add promising files to context with `View` for focused investigation. +- Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. **These tools automatically add found files to context as read-only.** +- Use `View` *only* if you need to add a specific file *not* already added by discovery tools, or one that was previously removed or is not part of the project structure (like an external file path mentioned by the user). - Remove irrelevant files with `Remove` to maintain focus. - Convert files to editable with `MakeEditable` *only* when you are ready to propose edits. - Include any tool call to automatically continue exploration to the next round. @@ -141,7 +141,7 @@ When you include any tool call, the system will automatically continue to the ne - All tool calls MUST be placed after a '---' line separator at the end of your message - Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution - Tool names are case-insensitive; parameters can be unquoted or quoted -- Verify files aren't already in context before adding them with `View` +- **Remember:** Discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`) automatically add found files to context. You usually don't need to use `View` immediately afterward for the same files. Verify files aren't already in context *before* using `View`. - Use precise search patterns with `ViewFilesMatching` and `file_pattern` to narrow scope - Target specific patterns rather than overly broad searches - Remember the `ViewFilesWithSymbol` tool is optimized for locating symbols across the codebase From 032a0e5d35ed63116f9156fd19e4a9d94af3f384 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 07:42:08 -0400 Subject: [PATCH 24/63] Fix find files with symbol --- aider/coders/navigator_coder.py | 7 +++- aider/tools/view_files_with_symbol.py | 55 ++++++++++++++++++++------- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 7912aac57..551bbd7b4 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -55,10 +55,12 @@ from aider.tools.undo_change import _execute_undo_change from aider.tools.list_changes import _execute_list_changes from aider.tools.extract_lines import _execute_extract_lines from aider.tools.view_numbered_context import execute_view_numbered_context +from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol # Import the function + class NavigatorCoder(Coder): """Mode where the LLM autonomously manages which files are in context.""" - + edit_format = "navigator" gpt_prompts = NavigatorPrompts() @@ -719,9 +721,12 @@ class NavigatorCoder(Coder): elif norm_tool_name == 'viewfileswithsymbol': symbol = params.get('symbol') if symbol is not None: + # Call the imported function from the tools directory result_message = _execute_view_files_with_symbol(self, symbol) else: result_message = "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" + + # Command tools elif norm_tool_name == 'command': command_string = params.get('command_string') if command_string is not None: diff --git a/aider/tools/view_files_with_symbol.py b/aider/tools/view_files_with_symbol.py index 6a847eb70..ebf849cd2 100644 --- a/aider/tools/view_files_with_symbol.py +++ b/aider/tools/view_files_with_symbol.py @@ -2,17 +2,43 @@ import os def _execute_view_files_with_symbol(coder, symbol): """ - Find files containing a specific symbol and add them to context as read-only. + Find files containing a symbol using RepoMap and add them to context. + Checks files already in context first. """ + if not coder.repo_map: + coder.io.tool_output("⚠️ Repo map not available, cannot use ViewFilesWithSymbol tool.") + return "Repo map not available" + + if not symbol: + return "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" + + # --- Start Modification --- + # 1. Check files already in context + files_in_context = list(coder.abs_fnames) + list(coder.abs_read_only_fnames) + found_in_context = [] + for abs_fname in files_in_context: + rel_fname = coder.get_rel_fname(abs_fname) + try: + # Use get_tags for consistency with RepoMap usage elsewhere for now. + tags = coder.repo_map.get_tags(abs_fname, rel_fname) + for tag in tags: + if tag.name == symbol: + found_in_context.append(rel_fname) + break # Found in this file, move to next + except Exception as e: + coder.io.tool_warning(f"Could not get symbols for {rel_fname} while checking context: {e}") + + if found_in_context: + # Symbol found in already loaded files. Report this and stop. + file_list = ", ".join(sorted(list(set(found_in_context)))) + coder.io.tool_output(f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search performed.") + return f"Symbol '{symbol}' found in already loaded file(s): {file_list}. No external search performed." + # --- End Modification --- + + + # 2. If not found in context, search the repository using RepoMap + coder.io.tool_output(f"🔎 Searching for symbol '{symbol}' in repository (excluding current context)...") try: - if not coder.repo_map: - coder.io.tool_output("⚠️ Repo map not available, cannot use ViewFilesWithSymbol tool.") - return "Repo map not available" - - if not symbol: - return "Error: Missing 'symbol' parameter for ViewFilesWithSymbol" - - coder.io.tool_output(f"🔎 Searching for symbol '{symbol}'...") found_files = set() current_context_files = coder.abs_fnames | coder.abs_read_only_fnames files_to_search = set(coder.get_all_abs_files()) - current_context_files @@ -54,8 +80,9 @@ def _execute_view_files_with_symbol(coder, symbol): rel_path = coder.get_rel_fname(abs_file_path) # Double check it's not already added somehow if abs_file_path not in coder.abs_fnames and abs_file_path not in coder.abs_read_only_fnames: - add_result = coder._add_file_to_context(rel_path, explicit=True) # Use explicit=True for clear output - if "Added" in add_result: + # Use explicit=True for clear output, even though it's an external search result + add_result = coder._add_file_to_context(rel_path, explicit=True) + if "Added" in add_result or "Viewed" in add_result: # Count successful adds/views added_count += 1 added_files_rel.append(rel_path) @@ -67,9 +94,9 @@ def _execute_view_files_with_symbol(coder, symbol): coder.io.tool_output(f"🔎 Found '{symbol}' and added files: {', '.join(added_files_rel)}") return f"Found symbol '{symbol}' and added {added_count} files as read-only." else: - coder.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files.") - return f"Symbol '{symbol}' not found in searchable files." + coder.io.tool_output(f"⚠️ Symbol '{symbol}' not found in searchable files (outside current context).") + return f"Symbol '{symbol}' not found in searchable files (outside current context)." except Exception as e: coder.io.tool_error(f"Error in ViewFilesWithSymbol: {str(e)}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file From 17964f476c53046b4cfbc96e4e2b1a618cb9f451 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 07:51:08 -0400 Subject: [PATCH 25/63] Rename ViewNumberedContext to ShowNumberedContext --- aider/coders/navigator_coder.py | 6 +++--- aider/coders/navigator_prompts.py | 16 ++++++++-------- ...bered_context.py => show_numbered_context.py} | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) rename aider/tools/{view_numbered_context.py => show_numbered_context.py} (98%) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 551bbd7b4..b7763a691 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -54,7 +54,7 @@ from aider.tools.indent_lines import _execute_indent_lines from aider.tools.undo_change import _execute_undo_change from aider.tools.list_changes import _execute_list_changes from aider.tools.extract_lines import _execute_extract_lines -from aider.tools.view_numbered_context import execute_view_numbered_context +from aider.tools.show_numbered_context import execute_show_numbered_context from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol # Import the function @@ -882,14 +882,14 @@ class NavigatorCoder(Coder): else: result_message = "Error: Missing required parameters for ExtractLines (source_file_path, target_file_path, start_pattern)" - elif norm_tool_name == 'viewnumberedcontext': + elif norm_tool_name == 'shownumberedcontext': file_path = params.get('file_path') pattern = params.get('pattern') line_number = params.get('line_number') context_lines = params.get('context_lines', 3) # Default context if file_path is not None and (pattern is not None or line_number is not None): - result_message = execute_view_numbered_context( + result_message = execute_show_numbered_context( self, file_path, pattern, line_number, context_lines ) else: diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 96904d21b..dce1ceffd 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -105,7 +105,7 @@ Act as an expert software engineer with the ability to autonomously navigate and Extract lines from `start_pattern` to `end_pattern` (or use `line_count`) in `source_file_path` and move them to `target_file_path`. Creates `target_file_path` if it doesn't exist. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. *Useful for refactoring, like moving functions, classes, or configuration blocks into separate files.* -- **ViewNumberedContext**: `[tool_call(ViewNumberedContext, file_path="path/to/file.py", pattern="optional_text", line_number=optional_int, context_lines=3)]` +- **ShowNumberedContext**: `[tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="optional_text", line_number=optional_int, context_lines=3)]` Displays numbered lines from `file_path` centered around a target location, without adding the file to context. Provide *either* `pattern` (to find the first occurrence) *or* `line_number` (1-based) to specify the center point. Returns the target line(s) plus `context_lines` (default 3) of surrounding context directly in the result message. Crucial for verifying exact line numbers and content before using `ReplaceLine` or `ReplaceLines`. ### Other Tools @@ -165,8 +165,8 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. 2. **Make Files Editable**: Use `MakeEditable` for files you intend to change. Can be combined in the same message as subsequent edits to that file. 3. **Plan & Confirm Edits (If Needed)**: Determine necessary edits. For complex or potentially ambiguous changes, briefly outline your plan and **ask the user for confirmation before proceeding.** For simple, direct changes, proceed to verification. 4. **Verify Parameters Before Execution:** - * **Pattern-Based Tools** (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ExtractLines`, `ReplaceText`): **Crucially, before executing the tool call, carefully examine the complete file content *already visible in the chat context*** to confirm your `start_pattern`, `end_pattern`, `near_context`, and `occurrence` parameters target the *exact* intended location. Do *not* rely on memory. This verification uses the existing context, *not* `ViewNumberedContext`. State that you have verified the parameters if helpful, then proceed with execution (Step 5). - * **Line-Number Based Tools** (`ReplaceLine`, `ReplaceLines`): **Mandatory Verification Workflow:** Follow the strict two-turn process using `ViewNumberedContext` as detailed below. Never view and edit lines in the same turn. + * **Pattern-Based Tools** (`InsertBlock`, `DeleteBlock`, `IndentLines`, `ExtractLines`, `ReplaceText`): **Crucially, before executing the tool call, carefully examine the complete file content *already visible in the chat context*** to confirm your `start_pattern`, `end_pattern`, `near_context`, and `occurrence` parameters target the *exact* intended location. Do *not* rely on memory. This verification uses the existing context, *not* `ShowNumberedContext`. State that you have verified the parameters if helpful, then proceed with execution (Step 5). + * **Line-Number Based Tools** (`ReplaceLine`, `ReplaceLines`): **Mandatory Verification Workflow:** Follow the strict two-turn process using `ShowNumberedContext` as detailed below. Never view and edit lines in the same turn. 5. **Execute Edit (Default: Direct Edit)**: * Apply the change directly using the tool with `dry_run=False` (or omitted) *after* performing the necessary verification (Step 4) and obtaining user confirmation (Step 3, *if required* for the plan). * **Immediately review the diff snippet in the `[Result (ToolName): ...]` message** to confirm the change was correct. @@ -183,16 +183,16 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. * **High Risk:** Line numbers are fragile and can become outdated due to preceding edits, even within the same multi-tool message. Using these tools without recent verification can lead to incorrect changes. * **Mandatory Verification Workflow:** 1. **Identify Target Location:** Determine the approximate location using line numbers (e.g., from linter output) or nearby text. - 2. **View Numbered Context (Separate Turn):** In one message, use `ViewNumberedContext` specifying *either* the `line_number` or a nearby `pattern` to display numbered lines for the target area. + 2. **View Numbered Context (Separate Turn):** In one message, use `ShowNumberedContext` specifying *either* the `line_number` or a nearby `pattern` to display numbered lines for the target area. ``` # Example using line number --- - [tool_call(ViewNumberedContext, file_path="path/to/file.py", line_number=APPROX_LINE, context_lines=5)] + [tool_call(ShowNumberedContext, file_path="path/to/file.py", line_number=APPROX_LINE, context_lines=5)] ``` ``` # Example using pattern --- - [tool_call(ViewNumberedContext, file_path="path/to/file.py", pattern="text_near_target", context_lines=5)] + [tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="text_near_target", context_lines=5)] ``` 3. **Verify:** Carefully examine the numbered output in the result message to confirm the *exact* line numbers and content you intend to modify. 4. **Edit (Next Turn):** Only in the *next* message, issue the `ReplaceLine` or `ReplaceLines` command using the verified line numbers. @@ -221,7 +221,7 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. - `ReplaceAll`: **Use with extreme caution!** Best suited for targeted renaming across a file. Consider `dry_run=True` first. Can easily cause unintended changes if `find_text` is common. - `InsertBlock`: For adding code blocks. - `DeleteBlock`: For removing code sections. -- `ReplaceLine`/`ReplaceLines`: For line-specific fixes (requires strict `ViewNumberedContext` verification). +- `ReplaceLine`/`ReplaceLines`: For line-specific fixes (requires strict `ShowNumberedContext` verification). - `IndentLines`: For adjusting indentation. - `ExtractLines`: For moving code between files. - `UndoChange`: For reverting specific edits. @@ -229,7 +229,7 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. #### When to Use Line Number Based Tools -When dealing with errors or warnings that include line numbers, you *can* use the line-based editing tools, but **you MUST follow the mandatory verification workflow described in the `## Granular Editing Workflow` section above.** This involves using `ViewNumberedContext` in one turn to verify the lines, and then using `ReplaceLine`/`ReplaceLines` in the *next* turn. +When dealing with errors or warnings that include line numbers, you *can* use the line-based editing tools, but **you MUST follow the mandatory verification workflow described in the `## Granular Editing Workflow` section above.** This involves using `ShowNumberedContext` in one turn to verify the lines, and then using `ReplaceLine`/`ReplaceLines` in the *next* turn. ``` Error in /path/to/file.py line 42: Syntax error: unexpected token diff --git a/aider/tools/view_numbered_context.py b/aider/tools/show_numbered_context.py similarity index 98% rename from aider/tools/view_numbered_context.py rename to aider/tools/show_numbered_context.py index 775e62b5e..ff855a712 100644 --- a/aider/tools/view_numbered_context.py +++ b/aider/tools/show_numbered_context.py @@ -1,6 +1,6 @@ import os -def execute_view_numbered_context(coder, file_path, pattern=None, line_number=None, context_lines=3): +def execute_show_numbered_context(coder, file_path, pattern=None, line_number=None, context_lines=3): """ Displays numbered lines from file_path centered around a target location (pattern or line_number), without adding the file to context. From ee94452ffeb3a8a9ab6017d5b2f569dd74ec370e Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 08:26:41 -0400 Subject: [PATCH 26/63] Add DeleteLine{,s} tools --- aider/coders/navigator_coder.py | 31 ++++++++- aider/coders/navigator_prompts.py | 15 +++- aider/tools/__init__.py | 35 ++++++++++ aider/tools/delete_line.py | 100 +++++++++++++++++++++++++++ aider/tools/delete_lines.py | 111 ++++++++++++++++++++++++++++++ 5 files changed, 289 insertions(+), 3 deletions(-) create mode 100644 aider/tools/delete_line.py create mode 100644 aider/tools/delete_lines.py diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index b7763a691..b2381dce9 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -51,6 +51,8 @@ from aider.tools.delete_block import _execute_delete_block from aider.tools.replace_line import _execute_replace_line from aider.tools.replace_lines import _execute_replace_lines from aider.tools.indent_lines import _execute_indent_lines +from aider.tools.delete_line import _execute_delete_line # New +from aider.tools.delete_lines import _execute_delete_lines # New from aider.tools.undo_change import _execute_undo_change from aider.tools.list_changes import _execute_list_changes from aider.tools.extract_lines import _execute_extract_lines @@ -851,7 +853,34 @@ class NavigatorCoder(Coder): ) else: result_message = "Error: Missing required parameters for IndentLines (file_path, start_pattern)" - + + elif norm_tool_name == 'deleteline': + file_path = params.get('file_path') + line_number = params.get('line_number') + change_id = params.get('change_id') + dry_run = params.get('dry_run', False) + + if file_path is not None and line_number is not None: + result_message = _execute_delete_line( + self, file_path, line_number, change_id, dry_run + ) + else: + result_message = "Error: Missing required parameters for DeleteLine (file_path, line_number)" + + elif norm_tool_name == 'deletelines': + file_path = params.get('file_path') + start_line = params.get('start_line') + end_line = params.get('end_line') + change_id = params.get('change_id') + dry_run = params.get('dry_run', False) + + if file_path is not None and start_line is not None and end_line is not None: + result_message = _execute_delete_lines( + self, file_path, start_line, end_line, change_id, dry_run + ) + else: + result_message = "Error: Missing required parameters for DeleteLines (file_path, start_line, end_line)" + elif norm_tool_name == 'undochange': change_id = params.get('change_id') file_path = params.get('file_path') diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index dce1ceffd..4460292aa 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -94,10 +94,18 @@ Act as an expert software engineer with the ability to autonomously navigate and - **IndentLines**: `[tool_call(IndentLines, file_path="...", start_pattern="...", end_pattern="...", indent_levels=1, near_context="...", occurrence=1, dry_run=False)]` Indent (`indent_levels` > 0) or unindent (`indent_levels` < 0) a block. Use `end_pattern` or `line_count` for range. Use `near_context` and `occurrence` (optional, default 1, -1 for last) for `start_pattern`. `dry_run=True` simulates. *Useful for fixing indentation errors reported by linters or reformatting code blocks. Also helpful for adjusting indentation after moving code with `ExtractLines`.* - + +- **DeleteLine**: `[tool_call(DeleteLine, file_path="...", line_number=42, dry_run=False)]` + Delete a specific line number (1-based). `dry_run=True` simulates. + *Useful for removing single erroneous lines identified by linters or exact line number.* + +- **DeleteLines**: `[tool_call(DeleteLines, file_path="...", start_line=42, end_line=45, dry_run=False)]` + Delete a range of lines (1-based, inclusive). `dry_run=True` simulates. + *Useful for removing multi-line blocks when exact line numbers are known.* + - **UndoChange**: `[tool_call(UndoChange, change_id="a1b2c3d4")]` or `[tool_call(UndoChange, file_path="...")]` Undo a specific change by ID, or the last change made to the specified `file_path`. - + - **ListChanges**: `[tool_call(ListChanges, file_path="...", limit=5)]` List recent changes, optionally filtered by `file_path` and limited. @@ -222,6 +230,7 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. - `InsertBlock`: For adding code blocks. - `DeleteBlock`: For removing code sections. - `ReplaceLine`/`ReplaceLines`: For line-specific fixes (requires strict `ShowNumberedContext` verification). +- `DeleteLine`/`DeleteLines`: For removing lines by number (requires strict `ShowNumberedContext` verification). - `IndentLines`: For adjusting indentation. - `ExtractLines`: For moving code between files. - `UndoChange`: For reverting specific edits. @@ -239,6 +248,8 @@ Warning in /path/to/file.py lines 105-107: This block should be indented For these cases, use: - `ReplaceLine` for single line fixes (e.g., syntax errors) - `ReplaceLines` for multi-line issues +- `DeleteLine` for removing single erroneous lines +- `DeleteLines` for removing multi-line blocks by number - `IndentLines` for indentation problems #### Multiline Tool Call Content Format diff --git a/aider/tools/__init__.py b/aider/tools/__init__.py index e69de29bb..f173e68cc 100644 --- a/aider/tools/__init__.py +++ b/aider/tools/__init__.py @@ -0,0 +1,35 @@ +# flake8: noqa: F401 +# Import tool functions into the aider.tools namespace + +# Discovery +from .ls import execute_ls +from .view_files_at_glob import execute_view_files_at_glob +from .view_files_matching import execute_view_files_matching +from .view_files_with_symbol import _execute_view_files_with_symbol + +# Context Management +from .view import execute_view +from .remove import _execute_remove +from .make_editable import _execute_make_editable +from .make_readonly import _execute_make_readonly +from .show_numbered_context import execute_show_numbered_context + +# Granular Editing +from .replace_text import _execute_replace_text +from .replace_all import _execute_replace_all +from .insert_block import _execute_insert_block +from .delete_block import _execute_delete_block +from .replace_line import _execute_replace_line +from .replace_lines import _execute_replace_lines +from .indent_lines import _execute_indent_lines +from .extract_lines import _execute_extract_lines +from .delete_line import _execute_delete_line +from .delete_lines import _execute_delete_lines + +# Change Tracking +from .undo_change import _execute_undo_change +from .list_changes import _execute_list_changes + +# Other +from .command import _execute_command +from .command_interactive import _execute_command_interactive diff --git a/aider/tools/delete_line.py b/aider/tools/delete_line.py new file mode 100644 index 000000000..e3b470ed2 --- /dev/null +++ b/aider/tools/delete_line.py @@ -0,0 +1,100 @@ +import os +import traceback + +def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=False): + """ + Delete a specific line number (1-based). + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - line_number: The 1-based line number to delete + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + file_content = coder.io.read_text(abs_path) + if file_content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before DeleteLine operation.") + return f"Error: Could not read file '{file_path}'" + + lines = file_content.splitlines() + original_content = file_content + + # Validate line number + try: + line_num_int = int(line_number) + if line_num_int < 1 or line_num_int > len(lines): + raise ValueError(f"Line number {line_num_int} is out of range (1-{len(lines)})") + line_idx = line_num_int - 1 # Convert to 0-based index + except ValueError as e: + coder.io.tool_error(f"Invalid line_number: {e}") + return f"Error: Invalid line_number '{line_number}'" + + # Prepare the deletion + deleted_line = lines[line_idx] + new_lines = lines[:line_idx] + lines[line_idx+1:] + new_content = '\n'.join(new_lines) + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: deleting line {line_num_int} would not change file") + return f"Warning: No changes made (deleting line {line_num_int} would not change file)" + + # Generate diff snippet (using the existing delete block helper for simplicity) + diff_snippet = coder._generate_diff_snippet_delete(original_content, line_idx, line_idx) + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would delete line {line_num_int} in {file_path}") + return f"Dry run: Would delete line {line_num_int}. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'line_number': line_num_int, + 'deleted_content': deleted_line + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='deleteline', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for DeleteLine: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + coder.io.tool_output(f"✅ Deleted line {line_num_int} in {file_path} (change_id: {change_id})") + return f"Successfully deleted line {line_num_int} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + + except Exception as e: + coder.io.tool_error(f"Error in DeleteLine: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" diff --git a/aider/tools/delete_lines.py b/aider/tools/delete_lines.py new file mode 100644 index 000000000..c24e123ad --- /dev/null +++ b/aider/tools/delete_lines.py @@ -0,0 +1,111 @@ +import os +import traceback + +def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None, dry_run=False): + """ + Delete a range of lines (1-based, inclusive). + + Parameters: + - coder: The Coder instance + - file_path: Path to the file to modify + - start_line: The 1-based starting line number to delete + - end_line: The 1-based ending line number to delete + - change_id: Optional ID for tracking the change + - dry_run: If True, simulate the change without modifying the file + + Returns a result message. + """ + try: + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + + # Check if file exists + if not os.path.isfile(abs_path): + coder.io.tool_error(f"File '{file_path}' not found") + return f"Error: File not found" + + # Check if file is in editable context + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") + return f"Error: File is read-only. Use MakeEditable first." + else: + coder.io.tool_error(f"File '{file_path}' not in context") + return f"Error: File not in context" + + # Reread file content immediately before modification + file_content = coder.io.read_text(abs_path) + if file_content is None: + coder.io.tool_error(f"Could not read file '{file_path}' before DeleteLines operation.") + return f"Error: Could not read file '{file_path}'" + + lines = file_content.splitlines() + original_content = file_content + + # Validate line numbers + try: + start_line_int = int(start_line) + end_line_int = int(end_line) + + if start_line_int < 1 or start_line_int > len(lines): + raise ValueError(f"Start line {start_line_int} is out of range (1-{len(lines)})") + if end_line_int < 1 or end_line_int > len(lines): + raise ValueError(f"End line {end_line_int} is out of range (1-{len(lines)})") + if start_line_int > end_line_int: + raise ValueError(f"Start line {start_line_int} cannot be after end line {end_line_int}") + + start_idx = start_line_int - 1 # Convert to 0-based index + end_idx = end_line_int - 1 # Convert to 0-based index + except ValueError as e: + coder.io.tool_error(f"Invalid line numbers: {e}") + return f"Error: Invalid line numbers '{start_line}', '{end_line}'" + + # Prepare the deletion + deleted_lines = lines[start_idx:end_idx+1] + new_lines = lines[:start_idx] + lines[end_idx+1:] + new_content = '\n'.join(new_lines) + + if original_content == new_content: + coder.io.tool_warning(f"No changes made: deleting lines {start_line_int}-{end_line_int} would not change file") + return f"Warning: No changes made (deleting lines {start_line_int}-{end_line_int} would not change file)" + + # Generate diff snippet + diff_snippet = coder._generate_diff_snippet_delete(original_content, start_idx, end_idx) + + # Handle dry run + if dry_run: + coder.io.tool_output(f"Dry run: Would delete lines {start_line_int}-{end_line_int} in {file_path}") + return f"Dry run: Would delete lines {start_line_int}-{end_line_int}. Diff snippet:\n{diff_snippet}" + + # --- Apply Change (Not dry run) --- + coder.io.write_text(abs_path, new_content) + + # Track the change + try: + metadata = { + 'start_line': start_line_int, + 'end_line': end_line_int, + 'deleted_content': '\n'.join(deleted_lines) + } + change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type='deletelines', + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + coder.io.tool_error(f"Error tracking change for DeleteLines: {track_e}") + change_id = "TRACKING_FAILED" + + coder.aider_edited_files.add(rel_path) + + num_deleted = end_idx - start_idx + 1 + coder.io.tool_output(f"✅ Deleted {num_deleted} lines ({start_line_int}-{end_line_int}) in {file_path} (change_id: {change_id})") + return f"Successfully deleted {num_deleted} lines ({start_line_int}-{end_line_int}) (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + + except Exception as e: + coder.io.tool_error(f"Error in DeleteLines: {str(e)}\n{traceback.format_exc()}") + return f"Error: {str(e)}" From 551b357559a1668980eae33f250fee7516a0a83f Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 08:39:16 -0400 Subject: [PATCH 27/63] Be even more strident about line-number-based edits --- aider/coders/navigator_prompts.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 4460292aa..10a9b17e3 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -187,28 +187,28 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. * Use `ListChanges` to review history. * If a direct edit's result diff shows an error, **immediately use `[tool_call(UndoChange, change_id="...")]` in your *next* message** before attempting a corrected edit. -**Using Line Number Based Tools (`ReplaceLine`, `ReplaceLines`):** -* **High Risk:** Line numbers are fragile and can become outdated due to preceding edits, even within the same multi-tool message. Using these tools without recent verification can lead to incorrect changes. -* **Mandatory Verification Workflow:** - 1. **Identify Target Location:** Determine the approximate location using line numbers (e.g., from linter output) or nearby text. - 2. **View Numbered Context (Separate Turn):** In one message, use `ShowNumberedContext` specifying *either* the `line_number` or a nearby `pattern` to display numbered lines for the target area. +**Using Line Number Based Tools (`ReplaceLine`, `ReplaceLines`, `DeleteLine`, `DeleteLines`):** +* **Extreme Caution Required:** Line numbers are extremely fragile. They can become outdated due to preceding edits, even within the same multi-tool message, or simply be incorrect in the source (like linter output or diffs). Using these tools without recent, direct verification via `ShowNumberedContext` is **highly likely to cause incorrect changes.** +* **Mandatory Verification Workflow (No Exceptions):** + 1. **Identify Target Location:** Determine the *approximate* location. **Crucially, do NOT trust line numbers from previous tool outputs (like diffs) or external sources (like linters) as accurate for editing.** They are only starting points for verification. + 2. **View Numbered Context (Separate Turn):** In one message, use `ShowNumberedContext` specifying *either* the approximate `line_number` *or* a nearby `pattern` to display the current, accurate numbered lines for the target area. ``` - # Example using line number + # Example using potentially outdated line number for verification target --- - [tool_call(ShowNumberedContext, file_path="path/to/file.py", line_number=APPROX_LINE, context_lines=5)] + [tool_call(ShowNumberedContext, file_path="path/to/file.py", line_number=APPROX_LINE_FROM_LINTER, context_lines=5)] ``` ``` - # Example using pattern + # Example using pattern near the target --- [tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="text_near_target", context_lines=5)] ``` - 3. **Verify:** Carefully examine the numbered output in the result message to confirm the *exact* line numbers and content you intend to modify. - 4. **Edit (Next Turn):** Only in the *next* message, issue the `ReplaceLine` or `ReplaceLines` command using the verified line numbers. + 3. **Verify:** Carefully examine the numbered output in the result message. This is the **only** reliable source for the line numbers you will use. Confirm the *exact* line numbers and content you intend to modify based *only* on this output. + 4. **Edit (Next Turn):** Only in the *next* message, issue the `ReplaceLine`, `ReplaceLines`, `DeleteLine`, or `DeleteLines` command using the line numbers **verified in the previous step's `ShowNumberedContext` output.** ``` --- - [tool_call(ReplaceLine, file_path="path/to/file.py", line_number=VERIFIED_LINE, new_content="...")] + [tool_call(ReplaceLine, file_path="path/to/file.py", line_number=VERIFIED_LINE_FROM_SHOW_NUMBERED_CONTEXT, new_content="...")] ``` -* **Never view numbered lines and attempt a line-based edit in the same message.** +* **Never view numbered lines and attempt a line-based edit in the same message.** This workflow *must* span two separate turns. ### Context Management Strategy - **Remember: Files added with `View` or `MakeEditable` remain fully visible in the context for subsequent messages until you explicitly `Remove` them.** From c55d789c25859f594c7eb2b3d1eaa328610b39bc Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 08:39:16 -0400 Subject: [PATCH 28/63] Try factoring out some common functionality. Incremental commit. --- aider/tools/delete_block.py | 213 +++++++++++------------------------- aider/tools/insert_block.py | 192 ++++++++++---------------------- aider/tools/replace_all.py | 126 ++++++++------------- aider/tools/replace_text.py | 154 ++++++++++---------------- aider/tools/tool_utils.py | 192 ++++++++++++++++++++++++++++++++ 5 files changed, 422 insertions(+), 455 deletions(-) create mode 100644 aider/tools/tool_utils.py diff --git a/aider/tools/delete_block.py b/aider/tools/delete_block.py index 2f3391ee0..e2cf44d8c 100644 --- a/aider/tools/delete_block.py +++ b/aider/tools/delete_block.py @@ -1,171 +1,82 @@ -import os import traceback +from .tool_utils import ( + ToolError, + validate_file_for_edit, + find_pattern_indices, + select_occurrence_index, + determine_line_range, + apply_change, + handle_tool_error, + format_tool_result, +) def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False): """ Delete a block of text between start_pattern and end_pattern (inclusive). - - Parameters: - - coder: The Coder instance - - file_path: Path to the file to modify - - start_pattern: Pattern marking the start of the block to delete (line containing this pattern) - - end_pattern: Optional pattern marking the end of the block (line containing this pattern) - - line_count: Optional number of lines to delete (alternative to end_pattern) - - near_context: Optional text nearby to help locate the correct instance of the start_pattern - - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. + Uses utility functions for validation, finding lines, and applying changes. """ + tool_name = "DeleteBlock" try: - # Get absolute file path - abs_path = coder.abs_root_path(file_path) - rel_path = coder.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in coder.abs_fnames: - if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification - file_content = coder.io.read_text(abs_path) - if file_content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before DeleteBlock operation.") - return f"Error: Could not read file '{file_path}'" - - # Validate we have either end_pattern or line_count, but not both - if end_pattern and line_count: - coder.io.tool_error("Cannot specify both end_pattern and line_count") - return "Error: Cannot specify both end_pattern and line_count" - - # Split into lines for easier handling - lines = file_content.splitlines() - original_content = file_content - - # Find occurrences of the start_pattern - start_pattern_line_indices = [] - for i, line in enumerate(lines): - if start_pattern in line: - if near_context: - context_window_start = max(0, i - 5) - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - start_pattern_line_indices.append(i) - else: - start_pattern_line_indices.append(i) + # 1. Validate file and get content + abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) + lines = original_content.splitlines() - if not start_pattern_line_indices: - err_msg = f"Start pattern '{start_pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" + # 2. Find the start line + pattern_desc = f"Start pattern '{start_pattern}'" + if near_context: + pattern_desc += f" near context '{near_context}'" + start_pattern_indices = find_pattern_indices(lines, start_pattern, near_context) + start_line_idx = select_occurrence_index(start_pattern_indices, occurrence, pattern_desc) - # Select the occurrence for the start pattern - num_occurrences = len(start_pattern_line_indices) - try: - occurrence = int(occurrence) - if occurrence == -1: - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 - else: - err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" - except ValueError: - coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" + # 3. Determine the end line, passing pattern_desc for better error messages + start_line, end_line = determine_line_range( + lines, start_line_idx, end_pattern, line_count, pattern_desc=pattern_desc + ) - start_line = start_pattern_line_indices[target_idx] - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - - # Find the end line based on end_pattern or line_count - end_line = -1 - if end_pattern: - for i in range(start_line, len(lines)): - if end_pattern in lines[i]: - end_line = i - break - if end_line == -1: - err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" - elif line_count: - try: - line_count = int(line_count) - if line_count <= 0: raise ValueError("Line count must be positive") - end_line = min(start_line + line_count - 1, len(lines) - 1) - except ValueError: - coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") - return f"Error: Invalid line_count value '{line_count}'" - else: - end_line = start_line - - # Prepare the deletion + # 4. Prepare the deletion deleted_lines = lines[start_line:end_line+1] new_lines = lines[:start_line] + lines[end_line+1:] new_content = '\n'.join(new_lines) - + if original_content == new_content: coder.io.tool_warning(f"No changes made: deletion would not change file") return f"Warning: No changes made (deletion would not change file)" - # Generate diff for feedback (assuming _generate_diff_snippet_delete is available on coder) + # 5. Generate diff for feedback diff_snippet = coder._generate_diff_snippet_delete(original_content, start_line, end_line) - - # Handle dry run - if dry_run: - coder.io.tool_output(f"Dry run: Would delete lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") - return f"Dry run: Would delete block. Diff snippet:\n{diff_snippet}" - - # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_line': start_line + 1, - 'end_line': end_line + 1, - 'start_pattern': start_pattern, - 'end_pattern': end_pattern, - 'line_count': line_count, - 'near_context': near_context, - 'occurrence': occurrence, - 'deleted_content': '\n'.join(deleted_lines) - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='deleteblock', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for DeleteBlock: {track_e}") - change_id = "TRACKING_FAILED" - - coder.aider_edited_files.add(rel_path) - - # Improve feedback num_deleted = end_line - start_line + 1 - coder.io.tool_output(f"✅ Deleted {num_deleted} lines (from {occurrence_str}start pattern) in {file_path} (change_id: {change_id})") - return f"Successfully deleted {num_deleted} lines (change_id: {change_id}). Diff snippet:\n{diff_snippet}" - + num_occurrences = len(start_pattern_indices) + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + + # 6. Handle dry run + if dry_run: + dry_run_message = f"Dry run: Would delete {num_deleted} lines ({start_line+1}-{end_line+1}) based on {occurrence_str}start pattern '{start_pattern}' in {file_path}." + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) + + # 7. Apply Change (Not dry run) + metadata = { + 'start_line': start_line + 1, + 'end_line': end_line + 1, + 'start_pattern': start_pattern, + 'end_pattern': end_pattern, + 'line_count': line_count, + 'near_context': near_context, + 'occurrence': occurrence, + 'deleted_content': '\n'.join(deleted_lines) + } + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'deleteblock', metadata, change_id + ) + + # 8. Format and return result, adding line range to success message + success_message = f"Deleted {num_deleted} lines ({start_line+1}-{end_line+1}) (from {occurrence_str}start pattern) in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + + except ToolError as e: + # Handle errors raised by utility functions (expected errors) + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in DeleteBlock: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) diff --git a/aider/tools/insert_block.py b/aider/tools/insert_block.py index 139d8aa4a..26e83fed2 100644 --- a/aider/tools/insert_block.py +++ b/aider/tools/insert_block.py @@ -1,159 +1,87 @@ import os import traceback +from .tool_utils import ( + ToolError, + validate_file_for_edit, + find_pattern_indices, + select_occurrence_index, + apply_change, + handle_tool_error, + format_tool_result, +) def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None, near_context=None, occurrence=1, change_id=None, dry_run=False): """ - Insert a block of text after or before a specified pattern. - - Parameters: - - coder: The Coder instance - - file_path: Path to the file to modify - - content: Text block to insert - - after_pattern: Pattern after which to insert the block (line containing this pattern) - specify one of after/before - - before_pattern: Pattern before which to insert the block (line containing this pattern) - specify one of after/before - - near_context: Optional text nearby to help locate the correct instance of the pattern - - occurrence: Which occurrence of the pattern to use (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. + Insert a block of text after or before a specified pattern using utility functions. """ + tool_name = "InsertBlock" try: - # Get absolute file path - abs_path = coder.abs_root_path(file_path) - rel_path = coder.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in coder.abs_fnames: - if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification (Fixes Point 3: Stale Reads) - file_content = coder.io.read_text(abs_path) - if file_content is None: - # Provide more specific error (Improves Point 4) - coder.io.tool_error(f"Could not read file '{file_path}' before InsertBlock operation.") - return f"Error: Could not read file '{file_path}'" - - # Validate we have either after_pattern or before_pattern, but not both + # 1. Validate parameters if after_pattern and before_pattern: - coder.io.tool_error("Cannot specify both after_pattern and before_pattern") - return "Error: Cannot specify both after_pattern and before_pattern" + raise ToolError("Cannot specify both after_pattern and before_pattern") if not after_pattern and not before_pattern: - coder.io.tool_error("Must specify either after_pattern or before_pattern") - return "Error: Must specify either after_pattern or before_pattern" - - # Split into lines for easier handling - lines = file_content.splitlines() - original_content = file_content - - # Find occurrences of the pattern (either after_pattern or before_pattern) + raise ToolError("Must specify either after_pattern or before_pattern") + + # 2. Validate file and get content + abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) + lines = original_content.splitlines() + + # 3. Find the target line index pattern = after_pattern if after_pattern else before_pattern pattern_type = "after" if after_pattern else "before" - - # Find line indices containing the pattern - pattern_line_indices = [] - for i, line in enumerate(lines): - if pattern in line: - # If near_context is provided, check if it's nearby - if near_context: - context_window_start = max(0, i - 5) # Check 5 lines before/after - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - pattern_line_indices.append(i) - else: - pattern_line_indices.append(i) + pattern_desc = f"Pattern '{pattern}'" + if near_context: + pattern_desc += f" near context '{near_context}'" - if not pattern_line_indices: - err_msg = f"Pattern '{pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - - # Select the occurrence (Implements Point 5) - num_occurrences = len(pattern_line_indices) - try: - occurrence = int(occurrence) # Ensure occurrence is an integer - if occurrence == -1: # Last occurrence - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 # Convert 1-based to 0-based - else: - err_msg = f"Occurrence number {occurrence} is out of range for pattern '{pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" # Improve Point 4 - except ValueError: - coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" + pattern_line_indices = find_pattern_indices(lines, pattern, near_context) + target_line_idx = select_occurrence_index(pattern_line_indices, occurrence, pattern_desc) # Determine the final insertion line index - insertion_line_idx = pattern_line_indices[target_idx] + insertion_line_idx = target_line_idx if pattern_type == "after": insertion_line_idx += 1 # Insert on the line *after* the matched line - # Prepare the content to insert + + # 4. Prepare the insertion content_lines = content.splitlines() - - # Create the new lines array new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:] - new_content = '\n'.join(new_lines) # Use '\n' to match io.write_text behavior - + new_content = '\n'.join(new_lines) + if original_content == new_content: coder.io.tool_warning(f"No changes made: insertion would not change file") return f"Warning: No changes made (insertion would not change file)" - # Generate diff for feedback + # 5. Generate diff for feedback diff_snippet = coder._generate_diff_snippet_insert(original_content, insertion_line_idx, content_lines) - - # Handle dry run (Implements Point 6) - if dry_run: - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - coder.io.tool_output(f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path}") - return f"Dry run: Would insert block. Diff snippet:\n{diff_snippet}" - - # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'insertion_line_idx': insertion_line_idx, - 'after_pattern': after_pattern, - 'before_pattern': before_pattern, - 'near_context': near_context, - 'occurrence': occurrence, - 'content': content - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='insertblock', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for InsertBlock: {track_e}") - change_id = "TRACKING_FAILED" - - coder.aider_edited_files.add(rel_path) - - # Improve feedback (Point 5 & 6) + num_occurrences = len(pattern_line_indices) occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - coder.io.tool_output(f"✅ Inserted block {pattern_type} {occurrence_str}pattern in {file_path} (change_id: {change_id})") - return f"Successfully inserted block (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + + # 6. Handle dry run + if dry_run: + dry_run_message = f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path} at line {insertion_line_idx + 1}." + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) + + # 7. Apply Change (Not dry run) + metadata = { + 'insertion_line_idx': insertion_line_idx, + 'after_pattern': after_pattern, + 'before_pattern': before_pattern, + 'near_context': near_context, + 'occurrence': occurrence, + 'content': content + } + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'insertblock', metadata, change_id + ) + + # 8. Format and return result + success_message = f"Inserted block {pattern_type} {occurrence_str}pattern in {file_path} at line {insertion_line_idx + 1}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + + except ToolError as e: + # Handle errors raised by utility functions (expected errors) + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: coder.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback diff --git a/aider/tools/replace_all.py b/aider/tools/replace_all.py index 468bb15b7..1764a23ee 100644 --- a/aider/tools/replace_all.py +++ b/aider/tools/replace_all.py @@ -1,95 +1,65 @@ -import os import traceback +from .tool_utils import ( + ToolError, + validate_file_for_edit, + apply_change, + handle_tool_error, + format_tool_result, +) def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=None, dry_run=False): """ - Replace all occurrences of text in a file. - - Parameters: - - coder: The Coder instance - - file_path: Path to the file to modify - - find_text: Text to find and replace - - replace_text: Text to replace it with - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. + Replace all occurrences of text in a file using utility functions. """ + # Get absolute file path + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + tool_name = "ReplaceAll" try: - # Get absolute file path - abs_path = coder.abs_root_path(file_path) - rel_path = coder.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in coder.abs_fnames: - if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification - content = coder.io.read_text(abs_path) - if content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceAll operation.") - return f"Error: Could not read file '{file_path}'" - - # Count occurrences - count = content.count(find_text) + # 1. Validate file and get content + abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) + + # 2. Count occurrences + count = original_content.count(find_text) if count == 0: - coder.io.tool_warning(f"Text '{find_text}' not found in file") + coder.io.tool_warning(f"Text '{find_text}' not found in file '{file_path}'") return f"Warning: Text not found in file" - - # Perform the replacement - original_content = content - new_content = content.replace(find_text, replace_text) - + + # 3. Perform the replacement + new_content = original_content.replace(find_text, replace_text) + if original_content == new_content: coder.io.tool_warning(f"No changes made: replacement text is identical to original") return f"Warning: No changes made (replacement identical to original)" - - # Generate diff for feedback (more comprehensive for ReplaceAll) + + # 4. Generate diff for feedback + # Note: _generate_diff_chunks is currently on the Coder class diff_examples = coder._generate_diff_chunks(original_content, find_text, replace_text) - # Handle dry run + # 5. Handle dry run if dry_run: - coder.io.tool_output(f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}") - return f"Dry run: Would replace {count} occurrences. Diff examples:\n{diff_examples}" + dry_run_message = f"Dry run: Would replace {count} occurrences of '{find_text}' in {file_path}." + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_examples) - # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'find_text': find_text, - 'replace_text': replace_text, - 'occurrences': count - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='replaceall', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for ReplaceAll: {track_e}") - change_id = "TRACKING_FAILED" + # 6. Apply Change (Not dry run) + metadata = { + 'find_text': find_text, + 'replace_text': replace_text, + 'occurrences': count + } + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'replaceall', metadata, change_id + ) - coder.aider_edited_files.add(rel_path) - - # Improve feedback - coder.io.tool_output(f"✅ Replaced {count} occurrences in {file_path} (change_id: {change_id})") - return f"Successfully replaced {count} occurrences (change_id: {change_id}). Diff examples:\n{diff_examples}" - + # 7. Format and return result + success_message = f"Replaced {count} occurrences in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_examples + ) + + except ToolError as e: + # Handle errors raised by utility functions + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in ReplaceAll: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) diff --git a/aider/tools/replace_text.py b/aider/tools/replace_text.py index 54a149059..9e1a3dfcc 100644 --- a/aider/tools/replace_text.py +++ b/aider/tools/replace_text.py @@ -1,125 +1,91 @@ -import os import traceback +from .tool_utils import ( + ToolError, + validate_file_for_edit, + apply_change, + handle_tool_error, + format_tool_result, +) def _execute_replace_text(coder, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False): """ Replace specific text with new text, optionally using nearby context for disambiguation. - - Parameters: - - coder: The Coder instance - - file_path: Path to the file to modify - - find_text: Text to find and replace - - replace_text: Text to replace it with - - near_context: Optional text nearby to help locate the correct instance - - occurrence: Which occurrence to replace (1-based index, or -1 for last) - - change_id: Optional ID for tracking the change - - dry_run: If True, simulate the change without modifying the file - - Returns a result message. + Uses utility functions for validation, finding occurrences, and applying changes. """ + tool_name = "ReplaceText" try: - # Get absolute file path - abs_path = coder.abs_root_path(file_path) - rel_path = coder.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in coder.abs_fnames: - if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification - content = coder.io.read_text(abs_path) - if content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceText operation.") - return f"Error: Could not read file '{file_path}'" - - # Find occurrences using helper function (assuming _find_occurrences is available on coder) - occurrences = coder._find_occurrences(content, find_text, near_context) - + # 1. Validate file and get content + abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) + + # 2. Find occurrences using helper function + # Note: _find_occurrences is currently on the Coder class, not in tool_utils + occurrences = coder._find_occurrences(original_content, find_text, near_context) + if not occurrences: err_msg = f"Text '{find_text}' not found" if near_context: err_msg += f" near context '{near_context}'" err_msg += f" in file '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" + raise ToolError(err_msg) - # Select the occurrence + # 3. Select the occurrence index num_occurrences = len(occurrences) try: occurrence = int(occurrence) if occurrence == -1: + if num_occurrences == 0: + raise ToolError(f"Text '{find_text}' not found, cannot select last occurrence.") target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 + elif 1 <= occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based else: err_msg = f"Occurrence number {occurrence} is out of range. Found {num_occurrences} occurrences of '{find_text}'" if near_context: err_msg += f" near '{near_context}'" err_msg += f" in '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" + raise ToolError(err_msg) except ValueError: - coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" + raise ToolError(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") start_index = occurrences[target_idx] - - # Perform the replacement - original_content = content - new_content = content[:start_index] + replace_text + content[start_index + len(find_text):] - + + # 4. Perform the replacement + new_content = original_content[:start_index] + replace_text + original_content[start_index + len(find_text):] + if original_content == new_content: coder.io.tool_warning(f"No changes made: replacement text is identical to original") return f"Warning: No changes made (replacement identical to original)" - - # Generate diff for feedback (assuming _generate_diff_snippet is available on coder) - diff_example = coder._generate_diff_snippet(original_content, start_index, len(find_text), replace_text) - # Handle dry run - if dry_run: - coder.io.tool_output(f"Dry run: Would replace occurrence {occurrence} of '{find_text}' in {file_path}") - return f"Dry run: Would replace text (occurrence {occurrence}). Diff snippet:\n{diff_example}" - - # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_index': start_index, - 'find_text': find_text, - 'replace_text': replace_text, - 'near_context': near_context, - 'occurrence': occurrence - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='replacetext', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for ReplaceText: {track_e}") - change_id = "TRACKING_FAILED" - - coder.aider_edited_files.add(rel_path) - - # Improve feedback + # 5. Generate diff for feedback + # Note: _generate_diff_snippet is currently on the Coder class + diff_snippet = coder._generate_diff_snippet(original_content, start_index, len(find_text), replace_text) occurrence_str = f"occurrence {occurrence}" if num_occurrences > 1 else "text" - coder.io.tool_output(f"✅ Replaced {occurrence_str} in {file_path} (change_id: {change_id})") - return f"Successfully replaced {occurrence_str} (change_id: {change_id}). Diff snippet:\n{diff_example}" - + + # 6. Handle dry run + if dry_run: + dry_run_message = f"Dry run: Would replace {occurrence_str} of '{find_text}' in {file_path}." + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) + + # 7. Apply Change (Not dry run) + metadata = { + 'start_index': start_index, + 'find_text': find_text, + 'replace_text': replace_text, + 'near_context': near_context, + 'occurrence': occurrence + } + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'replacetext', metadata, change_id + ) + + # 8. Format and return result + success_message = f"Replaced {occurrence_str} in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + + except ToolError as e: + # Handle errors raised by utility functions or explicitly raised here + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in ReplaceText: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) diff --git a/aider/tools/tool_utils.py b/aider/tools/tool_utils.py new file mode 100644 index 000000000..339c0af0c --- /dev/null +++ b/aider/tools/tool_utils.py @@ -0,0 +1,192 @@ +import os +import traceback + +class ToolError(Exception): + """Custom exception for tool-specific errors that should be reported to the LLM.""" + pass + +def resolve_paths(coder, file_path): + """Resolves absolute and relative paths for a given file path.""" + try: + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + return abs_path, rel_path + except Exception as e: + # Wrap unexpected errors during path resolution + raise ToolError(f"Error resolving path '{file_path}': {e}") + +def validate_file_for_edit(coder, file_path): + """ + Validates if a file exists, is in context, and is editable. + Reads and returns original content if valid. + Raises ToolError on failure. + + Returns: + tuple: (absolute_path, relative_path, original_content) + """ + abs_path, rel_path = resolve_paths(coder, file_path) + + if not os.path.isfile(abs_path): + raise ToolError(f"File '{file_path}' not found") + + if abs_path not in coder.abs_fnames: + if abs_path in coder.abs_read_only_fnames: + raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.") + else: + # File exists but is not in context at all + raise ToolError(f"File '{file_path}' not in context. Use View or MakeEditable first.") + + # Reread content immediately before potential modification + content = coder.io.read_text(abs_path) + if content is None: + # This indicates an issue reading a file we know exists and is in context + coder.io.tool_error(f"Internal error: Could not read file '{file_path}' which should be accessible.") + raise ToolError(f"Could not read file '{file_path}'") + + return abs_path, rel_path, content + +def find_pattern_indices(lines, pattern, near_context=None): + """Finds all line indices matching a pattern, optionally filtered by context.""" + indices = [] + for i, line in enumerate(lines): + if pattern in line: + if near_context: + # Check if near_context is within a window around the match + context_window_start = max(0, i - 5) # Check 5 lines before/after + context_window_end = min(len(lines), i + 6) + context_block = "\n".join(lines[context_window_start:context_window_end]) + if near_context in context_block: + indices.append(i) + else: + indices.append(i) + return indices + +def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"): + """ + Selects the target 0-based index from a list of indices based on the 1-based occurrence parameter. + Raises ToolError if the pattern wasn't found or the occurrence is invalid. + """ + num_occurrences = len(indices) + if not indices: + raise ToolError(f"{pattern_desc} not found") + + try: + occurrence = int(occurrence) # Ensure occurrence is an integer + if occurrence == -1: # Last occurrence + if num_occurrences == 0: + raise ToolError(f"{pattern_desc} not found, cannot select last occurrence.") + target_idx = num_occurrences - 1 + elif 1 <= occurrence <= num_occurrences: + target_idx = occurrence - 1 # Convert 1-based to 0-based + else: + raise ToolError(f"Occurrence number {occurrence} is out of range for {pattern_desc}. Found {num_occurrences} occurrences.") + except ValueError: + raise ToolError(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") + + return indices[target_idx] + +def determine_line_range(lines, start_pattern_line_index, end_pattern=None, line_count=None, pattern_desc="Block"): + """ + Determines the end line index based on end_pattern or line_count. + Raises ToolError if end_pattern is not found or line_count is invalid. + """ + start_line = start_pattern_line_index + end_line = -1 + + if end_pattern and line_count: + raise ToolError("Cannot specify both end_pattern and line_count") + + if end_pattern: + found_end = False + # Search from the start_line onwards for the end_pattern + for i in range(start_line, len(lines)): + if end_pattern in lines[i]: + end_line = i + found_end = True + break + if not found_end: + raise ToolError(f"End pattern '{end_pattern}' not found after start pattern on line {start_line + 1}") + elif line_count: + try: + line_count = int(line_count) + if line_count <= 0: + raise ValueError("Line count must be positive") + # Calculate end line index, ensuring it doesn't exceed file bounds + end_line = min(start_line + line_count - 1, len(lines) - 1) + except ValueError: + raise ToolError(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") + else: + # If neither end_pattern nor line_count is given, the range is just the start line + end_line = start_line + + return start_line, end_line + + +def apply_change(coder, abs_path, rel_path, original_content, new_content, change_type, metadata, change_id=None): + """ + Writes the new content, tracks the change, and updates coder state. + Returns the final change ID. Raises ToolError on tracking failure. + """ + coder.io.write_text(abs_path, new_content) + try: + final_change_id = coder.change_tracker.track_change( + file_path=rel_path, + change_type=change_type, + original_content=original_content, + new_content=new_content, + metadata=metadata, + change_id=change_id + ) + except Exception as track_e: + # Log the error but also raise ToolError to inform the LLM + coder.io.tool_error(f"Error tracking change for {change_type}: {track_e}") + raise ToolError(f"Failed to track change: {track_e}") + + coder.aider_edited_files.add(rel_path) + return final_change_id + + +def handle_tool_error(coder, tool_name, e, add_traceback=True): + """Logs tool errors and returns a formatted error message for the LLM.""" + error_message = f"Error in {tool_name}: {str(e)}" + if add_traceback: + error_message += f"\n{traceback.format_exc()}" + coder.io.tool_error(error_message) + # Return only the core error message to the LLM for brevity + return f"Error: {str(e)}" + +def format_tool_result(coder, tool_name, success_message, change_id=None, diff_snippet=None, dry_run=False, dry_run_message=None): + """Formats the result message for tool execution.""" + if dry_run: + full_message = dry_run_message or f"Dry run: Would execute {tool_name}." + if diff_snippet: + full_message += f" Diff snippet:\n{diff_snippet}" + coder.io.tool_output(full_message) # Log the dry run action + return full_message + else: + # Use the provided success message, potentially adding change_id and diff + full_message = f"✅ {success_message}" + if change_id: + full_message += f" (change_id: {change_id})" + coder.io.tool_output(full_message) # Log the success action + + result_for_llm = f"Successfully executed {tool_name}." + if change_id: + result_for_llm += f" Change ID: {change_id}." + if diff_snippet: + result_for_llm += f" Diff snippet:\n{diff_snippet}" + return result_for_llm + +# Example usage within a hypothetical tool: +# try: +# abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) +# # ... tool specific logic to determine new_content and metadata ... +# if dry_run: +# return format_tool_result(coder, "MyTool", "", dry_run=True, diff_snippet=diff) +# +# change_id = apply_change(coder, abs_path, rel_path, original_content, new_content, 'mytool', metadata) +# return format_tool_result(coder, "MyTool", f"Applied change to {file_path}", change_id=change_id, diff_snippet=diff) +# except ToolError as e: +# return handle_tool_error(coder, "MyTool", e, add_traceback=False) # Don't need traceback for ToolErrors +# except Exception as e: +# return handle_tool_error(coder, "MyTool", e) From 7eac68e267e04cf77beca6d343679455e7a2f522 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 09:02:39 -0400 Subject: [PATCH 29/63] Refactor the indent lines function to use tool_utils --- aider/tools/indent_lines.py | 220 ++++++++++++------------------------ 1 file changed, 73 insertions(+), 147 deletions(-) diff --git a/aider/tools/indent_lines.py b/aider/tools/indent_lines.py index e3f244faa..ff525b930 100644 --- a/aider/tools/indent_lines.py +++ b/aider/tools/indent_lines.py @@ -1,10 +1,20 @@ import os import traceback +from .tool_utils import ( + ToolError, + validate_file_for_edit, + find_pattern_indices, + select_occurrence_index, + determine_line_range, + apply_change, + handle_tool_error, + format_tool_result, +) def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False): """ - Indent or unindent a block of lines in a file. - + Indent or unindent a block of lines in a file using utility functions. + Parameters: - coder: The Coder instance - file_path: Path to the file to modify @@ -16,119 +26,37 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin - occurrence: Which occurrence of the start_pattern to use (1-based index, or -1 for last) - change_id: Optional ID for tracking the change - dry_run: If True, simulate the change without modifying the file - + Returns a result message. """ + tool_name = "IndentLines" try: - # Get absolute file path - abs_path = coder.abs_root_path(file_path) - rel_path = coder.get_rel_fname(abs_path) - - # Check if file exists - if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" - - # Check if file is in editable context - if abs_path not in coder.abs_fnames: - if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." - else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" - - # Reread file content immediately before modification - file_content = coder.io.read_text(abs_path) - if file_content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before IndentLines operation.") - return f"Error: Could not read file '{file_path}'" - - # Validate we have either end_pattern or line_count, but not both - if end_pattern and line_count: - coder.io.tool_error("Cannot specify both end_pattern and line_count") - return "Error: Cannot specify both end_pattern and line_count" - - # Split into lines for easier handling - lines = file_content.splitlines() - original_content = file_content - - # Find occurrences of the start_pattern - start_pattern_line_indices = [] - for i, line in enumerate(lines): - if start_pattern in line: - if near_context: - context_window_start = max(0, i - 5) - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - start_pattern_line_indices.append(i) - else: - start_pattern_line_indices.append(i) + # 1. Validate file and get content + abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) + lines = original_content.splitlines() - if not start_pattern_line_indices: - err_msg = f"Start pattern '{start_pattern}' not found" - if near_context: err_msg += f" near context '{near_context}'" - err_msg += f" in file '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" + # 2. Find the start line + pattern_desc = f"Start pattern '{start_pattern}'" + if near_context: + pattern_desc += f" near context '{near_context}'" + start_pattern_indices = find_pattern_indices(lines, start_pattern, near_context) + start_line_idx = select_occurrence_index(start_pattern_indices, occurrence, pattern_desc) - # Select the occurrence for the start pattern - num_occurrences = len(start_pattern_line_indices) - try: - occurrence = int(occurrence) - if occurrence == -1: - target_idx = num_occurrences - 1 - elif occurrence > 0 and occurrence <= num_occurrences: - target_idx = occurrence - 1 - else: - err_msg = f"Occurrence number {occurrence} is out of range for start pattern '{start_pattern}'. Found {num_occurrences} occurrences" - if near_context: err_msg += f" near '{near_context}'" - err_msg += f" in '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" - except ValueError: - coder.io.tool_error(f"Invalid occurrence value: '{occurrence}'. Must be an integer.") - return f"Error: Invalid occurrence value '{occurrence}'" + # 3. Determine the end line + start_line, end_line = determine_line_range( + lines, start_line_idx, end_pattern, line_count, pattern_desc=pattern_desc + ) - start_line = start_pattern_line_indices[target_idx] - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - - # Find the end line based on end_pattern or line_count - end_line = -1 - if end_pattern: - for i in range(start_line, len(lines)): - if end_pattern in lines[i]: - end_line = i - break - if end_line == -1: - err_msg = f"End pattern '{end_pattern}' not found after {occurrence_str}start pattern '{start_pattern}' (line {start_line + 1}) in '{file_path}'." - coder.io.tool_error(err_msg) - return f"Error: {err_msg}" - elif line_count: - try: - line_count = int(line_count) - if line_count <= 0: raise ValueError("Line count must be positive") - end_line = min(start_line + line_count - 1, len(lines) - 1) - except ValueError: - coder.io.tool_error(f"Invalid line_count value: '{line_count}'. Must be a positive integer.") - return f"Error: Invalid line_count value '{line_count}'" - else: - end_line = start_line - - # Determine indentation amount + # 4. Validate and prepare indentation try: indent_levels = int(indent_levels) except ValueError: - coder.io.tool_error(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.") - return f"Error: Invalid indent_levels value '{indent_levels}'" - + raise ToolError(f"Invalid indent_levels value: '{indent_levels}'. Must be an integer.") + indent_str = ' ' * 4 # Assume 4 spaces per level - - # Create a temporary copy to calculate the change modified_lines = list(lines) - - # Apply indentation to the temporary copy + + # Apply indentation logic (core logic remains) for i in range(start_line, end_line + 1): if indent_levels > 0: modified_lines[i] = (indent_str * indent_levels) + modified_lines[i] @@ -138,60 +66,58 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin actual_remove = min(spaces_to_remove, current_leading_spaces) if actual_remove > 0: modified_lines[i] = modified_lines[i][actual_remove:] - - # Join lines back into a string + new_content = '\n'.join(modified_lines) - + if original_content == new_content: coder.io.tool_warning(f"No changes made: indentation would not change file") return f"Warning: No changes made (indentation would not change file)" - # Generate diff for feedback (assuming _generate_diff_snippet_indent is available on coder) + # 5. Generate diff for feedback diff_snippet = coder._generate_diff_snippet_indent(original_content, new_content, start_line, end_line) - - # Handle dry run - if dry_run: - action = "indent" if indent_levels > 0 else "unindent" - coder.io.tool_output(f"Dry run: Would {action} lines {start_line+1}-{end_line+1} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}") - return f"Dry run: Would {action} block. Diff snippet:\n{diff_snippet}" - - # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_line': start_line + 1, - 'end_line': end_line + 1, - 'start_pattern': start_pattern, - 'end_pattern': end_pattern, - 'line_count': line_count, - 'indent_levels': indent_levels, - 'near_context': near_context, - 'occurrence': occurrence, - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='indentlines', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for IndentLines: {track_e}") - change_id = "TRACKING_FAILED" - - coder.aider_edited_files.add(rel_path) - - # Improve feedback - action = "Indented" if indent_levels > 0 else "Unindented" + num_occurrences = len(start_pattern_indices) + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" + action = "indent" if indent_levels > 0 else "unindent" levels = abs(indent_levels) level_text = "level" if levels == 1 else "levels" num_lines = end_line - start_line + 1 + + # 6. Handle dry run + if dry_run: + dry_run_message = f"Dry run: Would {action} {num_lines} lines ({start_line+1}-{end_line+1}) by {levels} {level_text} (based on {occurrence_str}start pattern '{start_pattern}') in {file_path}." + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) + + # 7. Apply Change (Not dry run) + metadata = { + 'start_line': start_line + 1, + 'end_line': end_line + 1, + 'start_pattern': start_pattern, + 'end_pattern': end_pattern, + 'line_count': line_count, + 'indent_levels': indent_levels, + 'near_context': near_context, + 'occurrence': occurrence, + } + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'indentlines', metadata, change_id + ) + + # 8. Format and return result + action_past = "Indented" if indent_levels > 0 else "Unindented" + success_message = f"{action_past} {num_lines} lines by {levels} {level_text} (from {occurrence_str}start pattern) in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + + except ToolError as e: + # Handle errors raised by utility functions (expected errors) + return handle_tool_error(coder, tool_name, e, add_traceback=False) + except Exception as e: + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) coder.io.tool_output(f"✅ {action} {num_lines} lines (from {occurrence_str}start pattern) by {levels} {level_text} in {file_path} (change_id: {change_id})") return f"Successfully {action.lower()} {num_lines} lines by {levels} {level_text} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" except Exception as e: coder.io.tool_error(f"Error in IndentLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file From 18a942759ccfa2f22ed74758890f682b7899c1a1 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 09:02:39 -0400 Subject: [PATCH 30/63] Make tools even more favored over SEARCH/REPLACE blocks --- aider/coders/navigator_prompts.py | 51 ++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 11 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 10a9b17e3..4a9cd250a 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -210,6 +210,30 @@ SEARCH/REPLACE blocks can appear anywhere in your response if needed. ``` * **Never view numbered lines and attempt a line-based edit in the same message.** This workflow *must* span two separate turns. +## Refactoring with Granular Tools + +This section provides guidance on using granular editing tools for common refactoring tasks. + +### Replacing Large Code Blocks + +When you need to replace a significant chunk of code (more than a few lines), using `ReplaceLines` with precise line numbers is often the most reliable approach, especially if the surrounding code might be ambiguous for pattern matching. + +1. **Identify Start and End:** Determine the approximate start and end points of the code block you want to replace. Use nearby unique text as patterns. +2. **Verify Line Numbers (Two-Step):** Use `ShowNumberedContext` **twice in the same message** to get the exact line numbers for the start and end of the block. Request a large context window (e.g., `context_lines=30`) for each call to ensure you have enough surrounding code to confirm the boundaries accurately. + ``` + # Example verification message + --- + [tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="unique_text_near_start", context_lines=30)] + [tool_call(ShowNumberedContext, file_path="path/to/file.py", pattern="unique_text_near_end", context_lines=30)] + ``` +3. **Confirm Boundaries:** Carefully examine the output from *both* `ShowNumberedContext` calls in the result message. Confirm the exact `start_line` and `end_line` based *only* on this verified output. +4. **Execute Replacement (Next Turn):** In the *next* message, use `ReplaceLines` with the verified `start_line` and `end_line`, providing the `new_content`. + ``` + --- + [tool_call(ReplaceLines, file_path="path/to/file.py", start_line=VERIFIED_START, end_line=VERIFIED_END, new_content=)] + ``` +5. **Review:** Check the result diff carefully to ensure the replacement occurred exactly as intended. + ### Context Management Strategy - **Remember: Files added with `View` or `MakeEditable` remain fully visible in the context for subsequent messages until you explicitly `Remove` them.** - Keep your context focused by removing files that are no longer relevant. @@ -292,8 +316,14 @@ def new_function(param1, param2): """)] ``` -### SEARCH/REPLACE Block Format (Use Sparingly) -**Again, prefer granular tools.** However, as a fallback, you can use SEARCH/REPLACE blocks with this exact format: +### SEARCH/REPLACE Block Format (Use ONLY as a Last Resort) +**Granular editing tools (like `ReplaceLines`, `InsertBlock`, `DeleteBlock`) are STRONGLY PREFERRED for ALL edits.** They offer significantly more precision and safety. + +Use SEARCH/REPLACE blocks **only** as a fallback mechanism when granular tools **cannot** achieve the desired outcome due to the *inherent nature* of the change itself (e.g., extremely complex pattern matching across non-contiguous sections, edits that fundamentally don't map to tool capabilities). **Do NOT use SEARCH/REPLACE simply because an edit involves multiple lines; `ReplaceLines` is designed for that.** + +**Before generating a SEARCH/REPLACE block for more than 1-2 lines, you MUST explicitly state why `ReplaceLines` (using the mandatory two-step verification workflow) is not suitable for the specific edit.** + +If you must use SEARCH/REPLACE, adhere strictly to this format: ````python path/to/file.ext @@ -305,14 +335,13 @@ Replacement code lines ```` NOTE that this uses four backticks as the fence and not three! -#### Guidelines for SEARCH/REPLACE -- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation -- Keep edit blocks focused and concise - include only the necessary context -- Include enough lines for uniqueness but avoid long unchanged sections -- For new files, use an empty SEARCH section -- To move code within a file, use two separate SEARCH/REPLACE blocks -- Respect the file paths exactly as they appear - +#### Guidelines for SEARCH/REPLACE (When Absolutely Necessary) +- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation. +- Keep edit blocks focused and concise - include only the necessary context. +- Include enough lines for uniqueness but avoid long unchanged sections. +- For new files, use an empty SEARCH section. +- To move code within a file, use two separate SEARCH/REPLACE blocks. +- Respect the file paths exactly as they appear. ### Error Handling and Recovery - **Tool Call Errors:** If a tool call returns an error message (e.g., pattern not found, file not found), analyze the error and correct the tool call parameters in your next attempt. - **Incorrect Edits:** If a tool call *succeeds* but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects): @@ -457,4 +486,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" +""" \ No newline at end of file From 5e81b6c1c4b9ecf5d09b5e7447eb5f252a460cf6 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 09:02:39 -0400 Subject: [PATCH 31/63] Replace ShowNumberedContext for tool_utils --- aider/tools/show_numbered_context.py | 67 ++++++++++++++-------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/aider/tools/show_numbered_context.py b/aider/tools/show_numbered_context.py index ff855a712..6df4386e4 100644 --- a/aider/tools/show_numbered_context.py +++ b/aider/tools/show_numbered_context.py @@ -1,31 +1,32 @@ import os +from .tool_utils import ToolError, resolve_paths, handle_tool_error def execute_show_numbered_context(coder, file_path, pattern=None, line_number=None, context_lines=3): """ Displays numbered lines from file_path centered around a target location (pattern or line_number), without adding the file to context. + Uses utility functions for path resolution and error handling. """ - error_message = None - if not (pattern is None) ^ (line_number is None): - error_message = "Provide exactly one of 'pattern' or 'line_number'." - coder.io.tool_error(error_message) - return f"Error: {error_message}" - - abs_path = coder.abs_root_path(file_path) - if not os.path.exists(abs_path): - error_message = f"File not found: {file_path}" - coder.io.tool_error(error_message) - return f"Error: {error_message}" - + tool_name = "ShowNumberedContext" try: + # 1. Validate arguments + if not (pattern is None) ^ (line_number is None): + raise ToolError("Provide exactly one of 'pattern' or 'line_number'.") + + # 2. Resolve path + abs_path, rel_path = resolve_paths(coder, file_path) + if not os.path.exists(abs_path): + # Check existence after resolving, as resolve_paths doesn't guarantee existence + raise ToolError(f"File not found: {file_path}") + + # 3. Read file content content = coder.io.read_text(abs_path) if content is None: - error_message = f"Could not read file: {file_path}" - coder.io.tool_error(error_message) - return f"Error: {error_message}" + raise ToolError(f"Could not read file: {file_path}") lines = content.splitlines() num_lines = len(lines) + # 4. Determine center line index center_line_idx = -1 found_by = "" @@ -36,15 +37,12 @@ def execute_show_numbered_context(coder, file_path, pattern=None, line_number=No center_line_idx = line_number_int - 1 # Convert to 0-based index found_by = f"line {line_number_int}" else: - error_message = f"Line number {line_number_int} is out of range (1-{num_lines}) for {file_path}." - coder.io.tool_error(error_message) - return f"Error: {error_message}" + raise ToolError(f"Line number {line_number_int} is out of range (1-{num_lines}) for {file_path}.") except ValueError: - error_message = f"Invalid line number '{line_number}'. Must be an integer." - coder.io.tool_error(error_message) - return f"Error: {error_message}" + raise ToolError(f"Invalid line number '{line_number}'. Must be an integer.") elif pattern is not None: + # TODO: Update this section for multiline pattern support later first_match_line_idx = -1 for i, line in enumerate(lines): if pattern in line: @@ -55,19 +53,17 @@ def execute_show_numbered_context(coder, file_path, pattern=None, line_number=No center_line_idx = first_match_line_idx found_by = f"pattern '{pattern}' on line {center_line_idx + 1}" else: - error_message = f"Pattern '{pattern}' not found in {file_path}." - coder.io.tool_error(error_message) - return f"Error: {error_message}" + raise ToolError(f"Pattern '{pattern}' not found in {file_path}.") if center_line_idx == -1: # Should not happen if logic above is correct, but as a safeguard - error_message = "Could not determine center line." - coder.io.tool_error(error_message) - return f"Error: {error_message}" + raise ToolError("Internal error: Could not determine center line.") - # Calculate context window + # 5. Calculate context window try: context_lines_int = int(context_lines) + if context_lines_int < 0: + raise ValueError("Context lines must be non-negative") except ValueError: coder.io.tool_warning(f"Invalid context_lines value '{context_lines}', using default 3.") context_lines_int = 3 @@ -75,17 +71,22 @@ def execute_show_numbered_context(coder, file_path, pattern=None, line_number=No start_line_idx = max(0, center_line_idx - context_lines_int) end_line_idx = min(num_lines - 1, center_line_idx + context_lines_int) - # Format output - output_lines = [f"Displaying context around {found_by} in {file_path}:"] + # 6. Format output + # Use rel_path for user-facing messages + output_lines = [f"Displaying context around {found_by} in {rel_path}:"] max_line_num_width = len(str(end_line_idx + 1)) # Width for padding for i in range(start_line_idx, end_line_idx + 1): line_num_str = str(i + 1).rjust(max_line_num_width) output_lines.append(f"{line_num_str} | {lines[i]}") + # Log success and return the formatted context directly + coder.io.tool_output(f"Successfully retrieved context for {rel_path}") return "\n".join(output_lines) + except ToolError as e: + # Handle expected errors raised by utility functions or validation + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - error_message = f"Error processing {file_path}: {e}" - coder.io.tool_error(error_message) - return f"Error: {error_message}" + # Handle unexpected errors during processing + return handle_tool_error(coder, tool_name, e) From 123879fad53be5329d488e03813104501f14744a Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 09:17:34 -0400 Subject: [PATCH 32/63] Only process SEARCH/REPLACE blocks before the tool call separator --- aider/coders/navigator_coder.py | 30 +++++++++++++++++++++--------- aider/coders/navigator_prompts.py | 15 ++++++++++++--- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index b2381dce9..42e9ff3c7 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -364,7 +364,7 @@ class NavigatorCoder(Coder): This is a key method that: 1. Processes any tool commands in the response (only after a '---' line) - 2. Processes any SEARCH/REPLACE blocks in the response (regardless of tool calls) + 2. Processes any SEARCH/REPLACE blocks in the response (only before the '---' line if one exists) 3. If tool commands were found, sets up for another automatic round This enables the "auto-exploration" workflow where the LLM can @@ -376,8 +376,9 @@ class NavigatorCoder(Coder): return True original_content = content # Keep the original response - # Process tool commands: returns content with tool calls removed, results, and flag if any tool calls were found - processed_content, result_messages, tool_calls_found = self._process_tool_commands(content) + # Process tool commands: returns content with tool calls removed, results, flag if any tool calls were found, + # and the content before the last '---' line + processed_content, result_messages, tool_calls_found, content_before_last_separator = self._process_tool_commands(content) # Since we are no longer suppressing, the partial_response_content IS the final content. # We might want to update it to the processed_content (without tool calls) if we don't @@ -388,12 +389,21 @@ class NavigatorCoder(Coder): # Process implicit file mentions using the content *after* tool calls were removed self._process_file_mentions(processed_content) - # Check if the content contains the SEARCH/REPLACE markers (do this regardless of tool calls) + # Check if the content contains the SEARCH/REPLACE markers has_search = "<<<<<<< SEARCH" in self.partial_response_content has_divider = "=======" in self.partial_response_content has_replace = ">>>>>>> REPLACE" in self.partial_response_content edit_match = has_search and has_divider and has_replace + # Check if there's a '---' line - if yes, SEARCH/REPLACE blocks can only appear before it + separator_marker = "\n---\n" + if separator_marker in original_content and edit_match: + # Check if the edit blocks are only in the part before the last '---' line + has_search_before = "<<<<<<< SEARCH" in content_before_last_separator + has_divider_before = "=======" in content_before_last_separator + has_replace_before = ">>>>>>> REPLACE" in content_before_last_separator + edit_match = has_search_before and has_divider_before and has_replace_before + if edit_match: self.io.tool_output("Detected edit blocks, applying changes within Navigator...") edited_files = self._apply_edits_from_response() @@ -464,8 +474,10 @@ class NavigatorCoder(Coder): Rules: 1. Tool calls must appear after the LAST '---' line separator in the content 2. Any tool calls before this last separator are treated as text (not executed) + 3. SEARCH/REPLACE blocks can only appear before this last separator Returns processed content, result messages, and a flag indicating if any tool calls were found. + Also returns the content before the last separator for SEARCH/REPLACE block validation. """ result_messages = [] modified_content = content # Start with original content @@ -479,16 +491,16 @@ class NavigatorCoder(Coder): # If there's no separator, treat the entire content as before the separator if len(content_parts) == 1: - # Return the original content with no tool calls processed - return content, result_messages, False + # Return the original content with no tool calls processed, and the content itself as before_separator + return content, result_messages, False, content # Take everything before the last separator (including intermediate separators) - content_before_separator = separator_marker.join(content_parts[:-1]) + separator_marker + content_before_separator = separator_marker.join(content_parts[:-1]) # Take only what comes after the last separator content_after_separator = content_parts[-1] # Find tool calls using a more robust method, but only in the content after separator - processed_content = content_before_separator + processed_content = content_before_separator + separator_marker last_index = 0 start_marker = "[tool_call(" end_marker = "]" # The parenthesis balancing finds the ')', we just need the final ']' @@ -945,7 +957,7 @@ class NavigatorCoder(Coder): # Update internal counter self.tool_call_count += call_count - return modified_content, result_messages, tool_calls_found + return modified_content, result_messages, tool_calls_found, content_before_separator def _apply_edits_from_response(self): """ diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 4a9cd250a..62382c216 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -158,7 +158,14 @@ When you include any tool call, the system will automatically continue to the ne ``` Your answer to the user's question... -SEARCH/REPLACE blocks can appear anywhere in your response if needed. +SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. + +file.py +<<<<<<< SEARCH +old code +======= +new code +>>>>>>> REPLACE --- [tool_call(ViewFilesMatching, pattern="findme")] @@ -446,7 +453,8 @@ Here are summaries of some files present in this repo: - You are encouraged to use tools for editing where possible, falling back to SEARCH/REPLACE when that doesn't work well. ## SEARCH/REPLACE blocks -- When you use them, SEARCH/REPLACE blocks can appear anywhere in your response +- When using SEARCH/REPLACE blocks, they MUST ONLY appear BEFORE the last '---' separator line in your response +- If there is no '---' separator, they can appear anywhere in your response - Format example: ``` Your answer text here... @@ -461,6 +469,7 @@ Here are summaries of some files present in this repo: --- [tool_call(ToolName, param1=value1)] ``` +- IMPORTANT: Any SEARCH/REPLACE blocks that appear after the last '---' separator will be IGNORED ## Context Features - Use enhanced context blocks (directory structure and git status) to orient yourself @@ -486,4 +495,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" \ No newline at end of file +""" From e039543e158fe3bf3c5ab385ce100ed090dc55b2 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 09:44:57 -0400 Subject: [PATCH 33/63] Require a justification when using SEARCH/REPLACE --- aider/coders/navigator_prompts.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 62382c216..660f8c06b 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -158,7 +158,10 @@ When you include any tool call, the system will automatically continue to the ne ``` Your answer to the user's question... -SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. +SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. Using SEARCH/REPLACE when granular tools could have been used is a FAILURE to follow instructions. + +# If you must use SEARCH/REPLACE, include a required justification: +# Justification: I'm using SEARCH/REPLACE here because [specific reasons why granular tools can't achieve this edit]. file.py <<<<<<< SEARCH @@ -328,7 +331,9 @@ def new_function(param1, param2): Use SEARCH/REPLACE blocks **only** as a fallback mechanism when granular tools **cannot** achieve the desired outcome due to the *inherent nature* of the change itself (e.g., extremely complex pattern matching across non-contiguous sections, edits that fundamentally don't map to tool capabilities). **Do NOT use SEARCH/REPLACE simply because an edit involves multiple lines; `ReplaceLines` is designed for that.** -**Before generating a SEARCH/REPLACE block for more than 1-2 lines, you MUST explicitly state why `ReplaceLines` (using the mandatory two-step verification workflow) is not suitable for the specific edit.** +**IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered a FAILURE to follow instructions.** + +**Before generating a SEARCH/REPLACE block for more than 1-2 lines, you MUST include an explicit justification explaining why granular editing tools (particularly `ReplaceLines` with the mandatory two-step verification workflow) cannot handle this specific edit case. Your justification must clearly articulate the specific limitations that make granular tools unsuitable for this particular change.** If you must use SEARCH/REPLACE, adhere strictly to this format: @@ -455,10 +460,14 @@ Here are summaries of some files present in this repo: ## SEARCH/REPLACE blocks - When using SEARCH/REPLACE blocks, they MUST ONLY appear BEFORE the last '---' separator line in your response - If there is no '---' separator, they can appear anywhere in your response +- IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered a FAILURE to follow instructions +- You MUST include a clear justification for why granular tools can't handle the specific edit when using SEARCH/REPLACE - Format example: ``` Your answer text here... + # Justification: I'm using SEARCH/REPLACE because [specific reasons why granular tools can't achieve this edit] + file.py <<<<<<< SEARCH old code From d6e58ce06355706993443a0f51803aa79071103c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 10:21:14 -0400 Subject: [PATCH 34/63] Emit more than 3 tool calls on separate lines --- aider/coders/navigator_prompts.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 660f8c06b..6f911c415 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -455,6 +455,7 @@ Here are summaries of some files present in this repo: ## Tool Call Format - Tool calls MUST be at the end of your message, after a '---' separator +- If emitting multiple tool calls (e.g., 3 or more), place each call on a new line for clarity. - You are encouraged to use tools for editing where possible, falling back to SEARCH/REPLACE when that doesn't work well. ## SEARCH/REPLACE blocks @@ -504,4 +505,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" +""" \ No newline at end of file From b51abd7fe763db5ceafd7754d0a2000362bafbd0 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 10:21:14 -0400 Subject: [PATCH 35/63] Capture TreeSitter ranges for tools to use --- aider/repomap.py | 67 +++++++++++++++++++++++++++++++++---- aider/tools/delete_block.py | 9 ++++- aider/tools/indent_lines.py | 9 ++++- aider/tools/tool_utils.py | 52 ++++++++++++++++++++++++++-- 4 files changed, 126 insertions(+), 11 deletions(-) diff --git a/aider/repomap.py b/aider/repomap.py index b21d65f02..c5a26a72b 100644 --- a/aider/repomap.py +++ b/aider/repomap.py @@ -27,11 +27,11 @@ from grep_ast.tsl import USING_TSL_PACK, get_language, get_parser # noqa: E402 # Define the Tag namedtuple with a default for specific_kind to maintain compatibility # with cached entries that might have been created with the old definition -class TagBase(namedtuple("TagBase", "rel_fname fname line name kind specific_kind")): +class TagBase(namedtuple("TagBase", "rel_fname fname line name kind specific_kind start_line end_line start_byte end_byte")): __slots__ = () - def __new__(cls, rel_fname, fname, line, name, kind, specific_kind=None): + def __new__(cls, rel_fname, fname, line, name, kind, specific_kind=None, start_line=None, end_line=None, start_byte=None, end_byte=None): # Provide a default value for specific_kind to handle old cached objects - return super(TagBase, cls).__new__(cls, rel_fname, fname, line, name, kind, specific_kind) + return super(TagBase, cls).__new__(cls, rel_fname, fname, line, name, kind, specific_kind, start_line, end_line, start_byte, end_byte) Tag = TagBase @@ -41,7 +41,7 @@ SQLITE_ERRORS = (sqlite3.OperationalError, sqlite3.DatabaseError, OSError) CACHE_VERSION = 5 if USING_TSL_PACK: - CACHE_VERSION = 6 + CACHE_VERSION = 7 class RepoMap: @@ -247,6 +247,51 @@ class RepoMap: self.io.tool_warning(f"File not found error: {fname}") def get_tags(self, fname, rel_fname): + def get_symbol_definition_location(self, file_path, symbol_name): + """ + Finds the unique definition location (start/end line) for a symbol in a file. + + Args: + file_path (str): The relative path to the file. + symbol_name (str): The name of the symbol to find. + + Returns: + tuple: (start_line, end_line) (0-based) if a unique definition is found. + + Raises: + ToolError: If the symbol is not found, not unique, or not a definition. + """ + abs_path = self.io.root_abs_path(file_path) # Assuming io has this helper or similar + rel_path = self.get_rel_fname(abs_path) # Ensure we use consistent relative path + + tags = self.get_tags(abs_path, rel_path) + if not tags: + raise ToolError(f"Symbol '{symbol_name}' not found in '{file_path}' (no tags).") + + definitions = [] + for tag in tags: + # Check if it's a definition and the name matches + if tag.kind == "def" and tag.name == symbol_name: + # Ensure we have valid location info + if tag.start_line is not None and tag.end_line is not None and tag.start_line >= 0: + definitions.append(tag) + + if not definitions: + # Check if it exists as a non-definition tag + non_defs = [tag for tag in tags if tag.name == symbol_name and tag.kind != "def"] + if non_defs: + raise ToolError(f"Symbol '{symbol_name}' found in '{file_path}', but not as a unique definition (found as {non_defs[0].kind}).") + else: + raise ToolError(f"Symbol '{symbol_name}' definition not found in '{file_path}'.") + + if len(definitions) > 1: + # Provide more context about ambiguity if possible + lines = sorted([d.start_line + 1 for d in definitions]) # 1-based for user message + raise ToolError(f"Symbol '{symbol_name}' is ambiguous in '{file_path}'. Found definitions on lines: {', '.join(map(str, lines))}.") + + # Unique definition found + definition_tag = definitions[0] + return definition_tag.start_line, definition_tag.end_line # Check if the file is in the cache and if the modification time has not changed file_mtime = self.get_mtime(fname) if file_mtime is None: @@ -345,7 +390,11 @@ class RepoMap: name=node.text.decode("utf-8"), kind=kind, specific_kind=specific_kind, - line=node.start_point[0], + line=node.start_point[0], # Legacy line number + start_line=node.start_point[0], + end_line=node.end_point[0], + start_byte=node.start_byte, + end_byte=node.end_byte, ) yield result @@ -375,7 +424,11 @@ class RepoMap: name=token, kind="ref", specific_kind="name", # Default for pygments fallback - line=-1, + line=-1, # Pygments doesn't give precise locations easily + start_line=-1, + end_line=-1, + start_byte=-1, + end_byte=-1, ) def get_ranked_tags( @@ -875,4 +928,4 @@ if __name__ == "__main__": repo_map = rm.get_ranked_tags_map(chat_fnames, other_fnames) dump(len(repo_map)) - print(repo_map) + print(repo_map) \ No newline at end of file diff --git a/aider/tools/delete_block.py b/aider/tools/delete_block.py index e2cf44d8c..c23d23666 100644 --- a/aider/tools/delete_block.py +++ b/aider/tools/delete_block.py @@ -30,7 +30,14 @@ def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, lin # 3. Determine the end line, passing pattern_desc for better error messages start_line, end_line = determine_line_range( - lines, start_line_idx, end_pattern, line_count, pattern_desc=pattern_desc + coder=coder, + file_path=rel_path, + lines=lines, + start_pattern_line_index=start_line_idx, + end_pattern=end_pattern, + line_count=line_count, + target_symbol=None, # DeleteBlock uses patterns, not symbols + pattern_desc=pattern_desc ) # 4. Prepare the deletion diff --git a/aider/tools/indent_lines.py b/aider/tools/indent_lines.py index ff525b930..708da3de0 100644 --- a/aider/tools/indent_lines.py +++ b/aider/tools/indent_lines.py @@ -44,7 +44,14 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin # 3. Determine the end line start_line, end_line = determine_line_range( - lines, start_line_idx, end_pattern, line_count, pattern_desc=pattern_desc + coder=coder, + file_path=rel_path, + lines=lines, + start_pattern_line_index=start_line_idx, + end_pattern=end_pattern, + line_count=line_count, + target_symbol=None, # IndentLines uses patterns, not symbols + pattern_desc=pattern_desc ) # 4. Validate and prepare indentation diff --git a/aider/tools/tool_utils.py b/aider/tools/tool_utils.py index 339c0af0c..401f2b594 100644 --- a/aider/tools/tool_utils.py +++ b/aider/tools/tool_utils.py @@ -85,11 +85,59 @@ def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"): return indices[target_idx] -def determine_line_range(lines, start_pattern_line_index, end_pattern=None, line_count=None, pattern_desc="Block"): +def determine_line_range( + coder, # Added: Need coder to access repo_map + file_path, # Added: Need file_path for repo_map lookup + lines, + start_pattern_line_index=None, # Made optional + end_pattern=None, + line_count=None, + target_symbol=None, # Added: New parameter for symbol targeting + pattern_desc="Block", +): """ Determines the end line index based on end_pattern or line_count. Raises ToolError if end_pattern is not found or line_count is invalid. """ + # Parameter validation: Ensure only one targeting method is used + targeting_methods = [ + target_symbol is not None, + start_pattern_line_index is not None, + # Note: line_count and end_pattern depend on start_pattern_line_index + ] + if sum(targeting_methods) > 1: + raise ToolError("Cannot specify target_symbol along with start_pattern.") + if sum(targeting_methods) == 0: + raise ToolError("Must specify either target_symbol or start_pattern.") # Or line numbers for line-based tools, handled elsewhere + + if target_symbol: + if end_pattern or line_count: + raise ToolError("Cannot specify end_pattern or line_count when using target_symbol.") + try: + # Use repo_map to find the symbol's definition range + start_line, end_line = coder.repo_map.get_symbol_definition_location(file_path, target_symbol) + return start_line, end_line + except AttributeError: # Use specific exception + # Check if repo_map exists and is initialized before accessing methods + if not hasattr(coder, 'repo_map') or coder.repo_map is None: + raise ToolError("RepoMap is not available or not initialized.") + # If repo_map exists, the error might be from get_symbol_definition_location itself + # Re-raise ToolErrors directly + raise + except ToolError as e: + # Propagate specific ToolErrors from repo_map (not found, ambiguous, etc.) + raise e + except Exception as e: + # Catch other unexpected errors during symbol lookup + raise ToolError(f"Unexpected error looking up symbol '{target_symbol}': {e}") + + # --- Existing logic for pattern/line_count based targeting --- + # Ensure start_pattern_line_index is provided if not using target_symbol + if start_pattern_line_index is None: + raise ToolError("Internal error: start_pattern_line_index is required when not using target_symbol.") + + # Assign start_line here for the pattern-based logic path + start_line = start_pattern_line_index # Start of existing logic start_line = start_pattern_line_index end_line = -1 @@ -189,4 +237,4 @@ def format_tool_result(coder, tool_name, success_message, change_id=None, diff_s # except ToolError as e: # return handle_tool_error(coder, "MyTool", e, add_traceback=False) # Don't need traceback for ToolErrors # except Exception as e: -# return handle_tool_error(coder, "MyTool", e) +# return handle_tool_error(coder, "MyTool", e) \ No newline at end of file From 1841d105fede6614356248ae60aa7bcdccd397b6 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 10:25:40 -0400 Subject: [PATCH 36/63] Allow the user to confirm commands forever --- aider/tools/command.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/aider/tools/command.py b/aider/tools/command.py index b9ee094a3..b9692b205 100644 --- a/aider/tools/command.py +++ b/aider/tools/command.py @@ -6,25 +6,23 @@ def _execute_command(coder, command_string): Execute a non-interactive shell command after user confirmation. """ try: - # Ask for confirmation before executing, allowing 'Always' - # Use the command string itself as the group key to remember preference per command - if not coder.io.confirm_ask( + # Ask for confirmation before executing. + # allow_never=True enables the 'Always' option. + # confirm_ask handles remembering the 'Always' choice based on the subject. + confirmed = coder.io.confirm_ask( "Allow execution of this command?", subject=command_string, explicit_yes_required=True, # Require explicit 'yes' or 'always' - allow_never=True # Enable the 'Don't ask again' option - ): - # Check if the reason for returning False was *not* because it's remembered - # (confirm_ask returns False if 'n' or 'no' is chosen, even if remembered) - # We only want to skip if the user actively said no *this time* or if it's - # remembered as 'never' (which shouldn't happen with allow_never=True, - # but checking io.never_ask_group is robust). - # If the command is in never_ask_group with a True value (meaning Always), - # confirm_ask would have returned True directly. - # So, if confirm_ask returns False here, it means the user chose No this time. + allow_never=True # Enable the 'Always' option + ) + + if not confirmed: + # This happens if the user explicitly says 'no' this time. + # If 'Always' was chosen previously, confirm_ask returns True directly. coder.io.tool_output(f"Skipped execution of shell command: {command_string}") return "Shell command execution skipped by user." + # Proceed with execution if confirmed is True coder.io.tool_output(f"⚙️ Executing non-interactive shell command: {command_string}") # Use run_cmd_subprocess for non-interactive execution From 4b2202cd26627845ae5a1a1ee65fba75f45cba74 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 10:29:53 -0400 Subject: [PATCH 37/63] Even more strident (and proximal) reminders to use tool-use editing --- aider/coders/navigator_prompts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 6f911c415..8ec4f2741 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -337,6 +337,7 @@ Use SEARCH/REPLACE blocks **only** as a fallback mechanism when granular tools * If you must use SEARCH/REPLACE, adhere strictly to this format: +# Justification: I'm using SEARCH/REPLACE because [specific reasons why granular tools can't achieve this edit] ````python path/to/file.ext <<<<<<< SEARCH @@ -354,6 +355,7 @@ NOTE that this uses four backticks as the fence and not three! - For new files, use an empty SEARCH section. - To move code within a file, use two separate SEARCH/REPLACE blocks. - Respect the file paths exactly as they appear. + ### Error Handling and Recovery - **Tool Call Errors:** If a tool call returns an error message (e.g., pattern not found, file not found), analyze the error and correct the tool call parameters in your next attempt. - **Incorrect Edits:** If a tool call *succeeds* but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects): @@ -363,6 +365,7 @@ NOTE that this uses four backticks as the fence and not three! - **Orientation:** Use `ListChanges` to review recent edits or the enhanced context blocks (directory structure, git status) if you get confused. +Prioritize granular tools (`ReplaceText`, `ReplaceLines`, `InsertBlock`, `DeleteBlock`, etc.) over SEARCH/REPLACE blocks. Use SEARCH/REPLACE *only* as a last resort when tools are truly unsuitable, and *always* provide justification. Failure to prioritize tools is a failure to follow instructions. Always reply to the user in {language}. """ @@ -505,4 +508,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" \ No newline at end of file +""" From b717384420ed5dbfa66d9ab075e41adf36c73c94 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 10:36:30 -0400 Subject: [PATCH 38/63] Put tool calls on multiple lines when a multi-line tool use is used --- aider/coders/navigator_prompts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 8ec4f2741..e4466e05a 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -458,7 +458,7 @@ Here are summaries of some files present in this repo: ## Tool Call Format - Tool calls MUST be at the end of your message, after a '---' separator -- If emitting multiple tool calls (e.g., 3 or more), place each call on a new line for clarity. +- If emitting 3 or more tool calls, OR if any tool call spans multiple lines, place each call on a new line for clarity. - You are encouraged to use tools for editing where possible, falling back to SEARCH/REPLACE when that doesn't work well. ## SEARCH/REPLACE blocks @@ -508,4 +508,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" +""" \ No newline at end of file From a4bcf4f938874cb6df00a113786fd7e3e8a73004 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 10:44:53 -0400 Subject: [PATCH 39/63] Add more real Git-style diffs in command feedback --- aider/tools/delete_block.py | 5 +++-- aider/tools/delete_line.py | 5 +++-- aider/tools/delete_lines.py | 5 +++-- aider/tools/extract_lines.py | 7 ++++--- aider/tools/indent_lines.py | 3 ++- aider/tools/insert_block.py | 3 ++- aider/tools/replace_lines.py | 8 +++++--- aider/tools/replace_text.py | 5 +++-- aider/tools/tool_utils.py | 39 +++++++++++++++++++++++++++++++++++- 9 files changed, 63 insertions(+), 17 deletions(-) diff --git a/aider/tools/delete_block.py b/aider/tools/delete_block.py index c23d23666..5ab2d6d37 100644 --- a/aider/tools/delete_block.py +++ b/aider/tools/delete_block.py @@ -8,6 +8,7 @@ from .tool_utils import ( apply_change, handle_tool_error, format_tool_result, + generate_unified_diff_snippet, ) def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, change_id=None, dry_run=False): @@ -50,7 +51,7 @@ def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, lin return f"Warning: No changes made (deletion would not change file)" # 5. Generate diff for feedback - diff_snippet = coder._generate_diff_snippet_delete(original_content, start_line, end_line) + diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) num_deleted = end_line - start_line + 1 num_occurrences = len(start_pattern_indices) occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" @@ -86,4 +87,4 @@ def _execute_delete_block(coder, file_path, start_pattern, end_pattern=None, lin return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: # Handle unexpected errors - return handle_tool_error(coder, tool_name, e) + return handle_tool_error(coder, tool_name, e) \ No newline at end of file diff --git a/aider/tools/delete_line.py b/aider/tools/delete_line.py index e3b470ed2..1e3f1d38a 100644 --- a/aider/tools/delete_line.py +++ b/aider/tools/delete_line.py @@ -1,5 +1,6 @@ import os import traceback +from .tool_utils import generate_unified_diff_snippet def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=False): """ @@ -62,7 +63,7 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run= return f"Warning: No changes made (deleting line {line_num_int} would not change file)" # Generate diff snippet (using the existing delete block helper for simplicity) - diff_snippet = coder._generate_diff_snippet_delete(original_content, line_idx, line_idx) + diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) # Handle dry run if dry_run: @@ -97,4 +98,4 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run= except Exception as e: coder.io.tool_error(f"Error in DeleteLine: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file diff --git a/aider/tools/delete_lines.py b/aider/tools/delete_lines.py index c24e123ad..bb49690ff 100644 --- a/aider/tools/delete_lines.py +++ b/aider/tools/delete_lines.py @@ -1,5 +1,6 @@ import os import traceback +from .tool_utils import generate_unified_diff_snippet def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None, dry_run=False): """ @@ -71,7 +72,7 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None return f"Warning: No changes made (deleting lines {start_line_int}-{end_line_int} would not change file)" # Generate diff snippet - diff_snippet = coder._generate_diff_snippet_delete(original_content, start_idx, end_idx) + diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) # Handle dry run if dry_run: @@ -108,4 +109,4 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None except Exception as e: coder.io.tool_error(f"Error in DeleteLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file diff --git a/aider/tools/extract_lines.py b/aider/tools/extract_lines.py index b4577beb7..a9a318e27 100644 --- a/aider/tools/extract_lines.py +++ b/aider/tools/extract_lines.py @@ -1,5 +1,6 @@ import os import traceback +from .tool_utils import generate_unified_diff_snippet def _execute_extract_lines(coder, source_file_path, target_file_path, start_pattern, end_pattern=None, line_count=None, near_context=None, occurrence=1, dry_run=False): """ @@ -145,9 +146,9 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt new_target_content = target_content + extracted_block # --- Generate Diffs --- - source_diff_snippet = coder._generate_diff_snippet_delete(original_source_content, start_line, end_line) + source_diff_snippet = generate_unified_diff_snippet(original_source_content, new_source_content, rel_source_path) target_insertion_line = len(target_content.splitlines()) if target_content else 0 - target_diff_snippet = coder._generate_diff_snippet_insert(original_target_content, target_insertion_line, extracted_lines) + target_diff_snippet = generate_unified_diff_snippet(original_target_content, new_target_content, rel_target_path) # --- Handle Dry Run --- if dry_run: @@ -217,4 +218,4 @@ def _execute_extract_lines(coder, source_file_path, target_file_path, start_patt except Exception as e: coder.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file diff --git a/aider/tools/indent_lines.py b/aider/tools/indent_lines.py index 708da3de0..4ac823fcc 100644 --- a/aider/tools/indent_lines.py +++ b/aider/tools/indent_lines.py @@ -9,6 +9,7 @@ from .tool_utils import ( apply_change, handle_tool_error, format_tool_result, + generate_unified_diff_snippet, ) def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, line_count=None, indent_levels=1, near_context=None, occurrence=1, change_id=None, dry_run=False): @@ -81,7 +82,7 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin return f"Warning: No changes made (indentation would not change file)" # 5. Generate diff for feedback - diff_snippet = coder._generate_diff_snippet_indent(original_content, new_content, start_line, end_line) + diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) num_occurrences = len(start_pattern_indices) occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" action = "indent" if indent_levels > 0 else "unindent" diff --git a/aider/tools/insert_block.py b/aider/tools/insert_block.py index 26e83fed2..75443e987 100644 --- a/aider/tools/insert_block.py +++ b/aider/tools/insert_block.py @@ -8,6 +8,7 @@ from .tool_utils import ( apply_change, handle_tool_error, format_tool_result, + generate_unified_diff_snippet, ) def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None, near_context=None, occurrence=1, change_id=None, dry_run=False): @@ -51,7 +52,7 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_ return f"Warning: No changes made (insertion would not change file)" # 5. Generate diff for feedback - diff_snippet = coder._generate_diff_snippet_insert(original_content, insertion_line_idx, content_lines) + diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) num_occurrences = len(pattern_line_indices) occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" diff --git a/aider/tools/replace_lines.py b/aider/tools/replace_lines.py index f6b641e7b..53e0e607c 100644 --- a/aider/tools/replace_lines.py +++ b/aider/tools/replace_lines.py @@ -1,5 +1,7 @@ import os import traceback +from .tool_utils import generate_unified_diff_snippet +from .tool_utils import generate_unified_diff_snippet def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, change_id=None, dry_run=False): """ @@ -7,7 +9,6 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, Useful for fixing errors identified by error messages or linters. Parameters: - - coder: The Coder instance - file_path: Path to the file to modify - start_line: The first line number to replace (1-based) - end_line: The last line number to replace (1-based) @@ -86,6 +87,7 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, if original_content == new_content_full: coder.io.tool_warning("No changes made: new content is identical to original") return f"Warning: No changes made (new content identical to original)" + diff_snippet = generate_unified_diff_snippet(original_content, new_content_full, rel_path) # Create a readable diff for the lines replacement diff = f"Lines {start_line}-{end_line}:\n" @@ -101,7 +103,7 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, # Handle dry run if dry_run: coder.io.tool_output(f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}") - return f"Dry run: Would replace lines {start_line}-{end_line}. Diff:\n{diff}" + return f"Dry run: Would replace lines {start_line}-{end_line}. Diff snippet:\n{diff_snippet}" # --- Apply Change (Not dry run) --- coder.io.write_text(abs_path, new_content_full) @@ -136,4 +138,4 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, except Exception as e: coder.io.tool_error(f"Error in ReplaceLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file diff --git a/aider/tools/replace_text.py b/aider/tools/replace_text.py index 9e1a3dfcc..b6409d40a 100644 --- a/aider/tools/replace_text.py +++ b/aider/tools/replace_text.py @@ -5,6 +5,7 @@ from .tool_utils import ( apply_change, handle_tool_error, format_tool_result, + generate_unified_diff_snippet, ) def _execute_replace_text(coder, file_path, find_text, replace_text, near_context=None, occurrence=1, change_id=None, dry_run=False): @@ -57,7 +58,7 @@ def _execute_replace_text(coder, file_path, find_text, replace_text, near_contex # 5. Generate diff for feedback # Note: _generate_diff_snippet is currently on the Coder class - diff_snippet = coder._generate_diff_snippet(original_content, start_index, len(find_text), replace_text) + diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) occurrence_str = f"occurrence {occurrence}" if num_occurrences > 1 else "text" # 6. Handle dry run @@ -88,4 +89,4 @@ def _execute_replace_text(coder, file_path, find_text, replace_text, near_contex return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: # Handle unexpected errors - return handle_tool_error(coder, tool_name, e) + return handle_tool_error(coder, tool_name, e) \ No newline at end of file diff --git a/aider/tools/tool_utils.py b/aider/tools/tool_utils.py index 401f2b594..6492b12ab 100644 --- a/aider/tools/tool_utils.py +++ b/aider/tools/tool_utils.py @@ -1,3 +1,4 @@ +import difflib import os import traceback @@ -170,6 +171,42 @@ def determine_line_range( return start_line, end_line +def generate_unified_diff_snippet(original_content, new_content, file_path, context_lines=3): + """ + Generates a unified diff snippet between original and new content. + + Args: + original_content (str): The original file content. + new_content (str): The modified file content. + file_path (str): The relative path to the file (for display in diff header). + context_lines (int): Number of context lines to show around changes. + + Returns: + str: A formatted unified diff snippet, or an empty string if no changes. + """ + if original_content == new_content: + return "" + + original_lines = original_content.splitlines(keepends=True) + new_lines = new_content.splitlines(keepends=True) + + diff = difflib.unified_diff( + original_lines, + new_lines, + fromfile=f"a/{file_path}", + tofile=f"b/{file_path}", + n=context_lines, # Number of context lines + ) + + # Join the diff lines, potentially skipping the header if desired, + # but let's keep it for standard format. + diff_snippet = "".join(diff) + + # Ensure snippet ends with a newline for cleaner formatting in results + if diff_snippet and not diff_snippet.endswith('\n'): + diff_snippet += '\n' + + return diff_snippet def apply_change(coder, abs_path, rel_path, original_content, new_content, change_type, metadata, change_id=None): """ Writes the new content, tracks the change, and updates coder state. @@ -237,4 +274,4 @@ def format_tool_result(coder, tool_name, success_message, change_id=None, diff_s # except ToolError as e: # return handle_tool_error(coder, "MyTool", e, add_traceback=False) # Don't need traceback for ToolErrors # except Exception as e: -# return handle_tool_error(coder, "MyTool", e) \ No newline at end of file +# return handle_tool_error(coder, "MyTool", e) From 7676425d1f6f4ccb93564f164ef123e0a768dfa0 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 11:01:54 -0400 Subject: [PATCH 40/63] Remove no longer used diff helpers --- aider/coders/navigator_coder.py | 181 -------------------------------- 1 file changed, 181 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 42e9ff3c7..f43ada683 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -1534,184 +1534,3 @@ Just reply with fixed versions of the {blocks} above that failed to match. - - # ------------------- Diff Generation Helpers ------------------- - - def _generate_diff_snippet(self, original_content, start_index, replaced_len, replacement_text): - """Generate a git-style diff snippet for a simple text replacement.""" - try: - lines = original_content.splitlines() - char_count = 0 - start_line_idx = -1 - start_char_idx_in_line = -1 - - # Find the line and character index where the change starts - for i, line in enumerate(lines): - line_len_with_newline = len(line) + 1 # Account for newline character - if char_count + line_len_with_newline > start_index: - start_line_idx = i - start_char_idx_in_line = start_index - char_count - break - char_count += line_len_with_newline - - if start_line_idx == -1: return "[Diff generation error: start index out of bounds]" - - # Determine the end line and character index - end_index = start_index + replaced_len - char_count = 0 - end_line_idx = -1 - end_char_idx_in_line = -1 - for i, line in enumerate(lines): - line_len_with_newline = len(line) + 1 - if char_count + line_len_with_newline > end_index: - end_line_idx = i - # End char index is relative to the start of *its* line - end_char_idx_in_line = end_index - char_count - break - char_count += line_len_with_newline - # If end_index is exactly at the end of the content - if end_line_idx == -1 and end_index == len(original_content): - end_line_idx = len(lines) - 1 - end_char_idx_in_line = len(lines[end_line_idx]) - - if end_line_idx == -1: return "[Diff generation error: end index out of bounds]" - - # Get context lines - context = 3 - diff_start_line = max(0, start_line_idx - context) - diff_end_line = min(len(lines) - 1, end_line_idx + context) - - diff_lines = [f"@@ line ~{start_line_idx + 1} @@"] - for i in range(diff_start_line, diff_end_line + 1): - if i >= start_line_idx and i <= end_line_idx: - # Line is part of the original replaced block - diff_lines.append(f"- {lines[i]}") - else: - # Context line - diff_lines.append(f" {lines[i]}") - - # Construct the new lines based on the replacement - prefix = lines[start_line_idx][:start_char_idx_in_line] - suffix = lines[end_line_idx][end_char_idx_in_line:] - - # Combine prefix, replacement, and suffix, then split into lines - combined_new_content = prefix + replacement_text + suffix - new_content_lines = combined_new_content.splitlines() - - # Add new lines to diff - for new_line in new_content_lines: - diff_lines.append(f"+ {new_line}") - - return "\n".join(diff_lines) - except Exception as e: - return f"[Diff generation error: {e}]" - - def _generate_diff_chunks(self, original_content, find_text, replace_text): - """Generate multiple git-style diff snippets for ReplaceAll.""" - try: - lines = original_content.splitlines() - new_lines_content = original_content.replace(find_text, replace_text) - new_lines = new_lines_content.splitlines() - - # Use difflib for a more robust diff - import difflib - diff = list(difflib.unified_diff(lines, new_lines, lineterm='', n=3)) # n=3 lines of context - - if len(diff) <= 2: # Only header lines, no changes found by diff - return "No significant changes detected by diff." - - # Process the diff output into readable chunks - # Skip header lines (---, +++) - processed_diff = "\n".join(diff[2:]) - - # Limit the output size if it's too large - max_diff_len = 2000 # Limit diff snippet size - if len(processed_diff) > max_diff_len: - processed_diff = processed_diff[:max_diff_len] + "\n... (diff truncated)" - - return processed_diff if processed_diff else "No changes detected." - except Exception as e: - return f"[Diff generation error: {e}]" - - def _generate_diff_snippet_insert(self, original_content, insertion_line_idx, content_lines_to_insert): - """Generate a git-style diff snippet for an insertion.""" - try: - lines = original_content.splitlines() - context = 3 - - # Determine context range - start_context = max(0, insertion_line_idx - context) - end_context = min(len(lines), insertion_line_idx + context) # End index is exclusive for slicing - - diff_lines = [f"@@ line ~{insertion_line_idx + 1} @@"] # Indicate insertion point - - # Add lines before insertion point - for i in range(start_context, insertion_line_idx): - diff_lines.append(f" {lines[i]}") - - # Add inserted lines - for line in content_lines_to_insert: - diff_lines.append(f"+ {line}") - - # Add lines after insertion point - for i in range(insertion_line_idx, end_context): - diff_lines.append(f" {lines[i]}") - - return "\n".join(diff_lines) - except Exception as e: - return f"[Diff generation error: {e}]" - - def _generate_diff_snippet_delete(self, original_content, start_line, end_line): - """Generate a git-style diff snippet for a deletion.""" - try: - lines = original_content.splitlines() - context = 3 - - # Determine context range - diff_start_line = max(0, start_line - context) - diff_end_line = min(len(lines) - 1, end_line + context) - - diff_lines = [f"@@ line {start_line + 1},{end_line + 1} @@"] # Indicate deletion range - - for i in range(diff_start_line, diff_end_line + 1): - if i >= start_line and i <= end_line: - # Line was deleted - diff_lines.append(f"- {lines[i]}") - else: - # Context line - diff_lines.append(f" {lines[i]}") - - return "\n".join(diff_lines) - except Exception as e: - return f"[Diff generation error: {e}]" - - def _generate_diff_snippet_indent(self, original_content, new_content, start_line, end_line): - """Generate a git-style diff snippet for indentation changes.""" - try: - original_lines = original_content.splitlines() - new_lines = new_content.splitlines() - context = 3 - - # Determine context range - diff_start_line = max(0, start_line - context) - diff_end_line = min(len(original_lines) - 1, end_line + context) - - diff_lines_output = [f"@@ lines ~{start_line + 1}-{end_line + 1} @@"] # Indicate affected range - - for i in range(diff_start_line, diff_end_line + 1): - # Ensure index is valid for both lists (should be, as only indentation changes) - if i < len(original_lines) and i < len(new_lines): - if i >= start_line and i <= end_line: - # Line is within the indented/unindented block - if original_lines[i] != new_lines[i]: # Show only if changed - diff_lines_output.append(f"- {original_lines[i]}") - diff_lines_output.append(f"+ {new_lines[i]}") - else: # If somehow unchanged, show as context - diff_lines_output.append(f" {original_lines[i]}") - else: - # Context line - diff_lines_output.append(f" {original_lines[i]}") - - return "\n".join(diff_lines_output) - except Exception as e: - return f"[Diff generation error: {e}]" From 8e964bf6e230107ca10a3d5e2b52dbe0d646f4ce Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 11:06:00 -0400 Subject: [PATCH 41/63] Add a regex parameter to ViewFilesMatching --- aider/coders/navigator_prompts.py | 4 ++-- aider/tools/view_files_matching.py | 33 ++++++++++++++++++++++++------ 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index e4466e05a..f52bb7b2a 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -41,9 +41,9 @@ Act as an expert software engineer with the ability to autonomously navigate and Find files matching a glob pattern. **Found files are automatically added to context as read-only.** Supports patterns like "src/**/*.ts" or "*.json". -- **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py")]` +- **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py", regex=False)]` Search for text in files. **Matching files are automatically added to context as read-only.** - Files with more matches are prioritized. `file_pattern` is optional. + Files with more matches are prioritized. `file_pattern` is optional. `regex` (optional, default False) enables regex search for `pattern`. - **Ls**: `[tool_call(Ls, directory="src/components")]` List files in a directory. Useful for exploring the project structure. diff --git a/aider/tools/view_files_matching.py b/aider/tools/view_files_matching.py index cf0041fe0..7d395721a 100644 --- a/aider/tools/view_files_matching.py +++ b/aider/tools/view_files_matching.py @@ -1,12 +1,19 @@ +import re import os import fnmatch -def execute_view_files_matching(coder, search_pattern, file_pattern=None): +def execute_view_files_matching(coder, search_pattern, file_pattern=None, regex=False): """ - Search for pattern in files and add matching files to context as read-only. + Search for pattern (literal string or regex) in files and add matching files to context as read-only. + + Args: + coder: The Coder instance. + search_pattern (str): The pattern to search for. Treated as a literal string by default. + file_pattern (str, optional): Glob pattern to filter which files are searched. Defaults to None (search all files). + regex (bool, optional): If True, treat search_pattern as a regular expression. Defaults to False. This tool lets the LLM search for content within files, mimicking - how a developer would use grep to find relevant code. + how a developer would use grep or regex search to find relevant code. """ try: # Get list of files to search @@ -31,8 +38,22 @@ def execute_view_files_matching(coder, search_pattern, file_pattern=None): try: with open(abs_path, 'r', encoding='utf-8') as f: content = f.read() - if search_pattern in content: - matches[file] = content.count(search_pattern) + match_count = 0 + if regex: + try: + matches_found = re.findall(search_pattern, content) + match_count = len(matches_found) + except re.error as e: + # Handle invalid regex patterns gracefully + coder.io.tool_error(f"Invalid regex pattern '{search_pattern}': {e}") + # Skip this file for this search if regex is invalid + continue + else: + # Exact string matching + match_count = content.count(search_pattern) + + if match_count > 0: + matches[file] = match_count except Exception: # Skip files that can't be read (binary, etc.) pass @@ -68,4 +89,4 @@ def execute_view_files_matching(coder, search_pattern, file_pattern=None): return f"Pattern not found in any files" except Exception as e: coder.io.tool_error(f"Error in ViewFilesMatching: {str(e)}") - return f"Error: {str(e)}" + return f"Error: {str(e)}" \ No newline at end of file From 46b0bee1846a894f8bb9ec56b186be614112da0c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 11:56:40 -0400 Subject: [PATCH 42/63] Add some defensive coding to ChangeTracker --- aider/change_tracker.py | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/aider/change_tracker.py b/aider/change_tracker.py index d06460e99..f826c3975 100644 --- a/aider/change_tracker.py +++ b/aider/change_tracker.py @@ -30,10 +30,24 @@ class ChangeTracker: - change_id: Unique identifier for the change """ if change_id is None: - change_id = self._generate_change_id() - + generated_id = self._generate_change_id() + # Ensure the generated ID is treated as a string + current_change_id = str(generated_id) + else: + # If an ID is provided, ensure it's treated as a string key/value + current_change_id = str(change_id) + + # Defensive check: Ensure the ID isn't literally the string 'False' or boolean False + # which might indicate an upstream issue or unexpected input. + if current_change_id == 'False' or current_change_id is False: + # Log a warning? For now, generate a new ID to prevent storing False. + print(f"Warning: change_id evaluated to False for {file_path}. Generating new ID.") + current_change_id = self._generate_change_id() + + change = { - 'id': change_id, + # Use the confirmed string ID here + 'id': current_change_id, 'file_path': file_path, 'type': change_type, 'original': original_content, @@ -41,10 +55,11 @@ class ChangeTracker: 'metadata': metadata or {}, 'timestamp': time.time() } - - self.changes[change_id] = change - self.files_changed[file_path].append(change_id) - return change_id + + # Use the confirmed string ID for storage and return + self.changes[current_change_id] = change + self.files_changed[file_path].append(current_change_id) + return current_change_id def undo_change(self, change_id): """ @@ -115,4 +130,4 @@ class ChangeTracker: def _generate_change_id(self): """Generate a unique ID for a change.""" - return str(uuid.uuid4())[:8] # Short, readable ID \ No newline at end of file + return str(uuid.uuid4())[:8] # Short, readable ID From 26ec1c4f825dc557657678d54d7ce8289ee4f35f Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 12:25:02 -0400 Subject: [PATCH 43/63] Fix ReplaceAll, using new git diff helper --- aider/tools/replace_all.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/tools/replace_all.py b/aider/tools/replace_all.py index 1764a23ee..279854578 100644 --- a/aider/tools/replace_all.py +++ b/aider/tools/replace_all.py @@ -4,6 +4,7 @@ from .tool_utils import ( validate_file_for_edit, apply_change, handle_tool_error, + generate_unified_diff_snippet, format_tool_result, ) @@ -33,8 +34,7 @@ def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=No return f"Warning: No changes made (replacement identical to original)" # 4. Generate diff for feedback - # Note: _generate_diff_chunks is currently on the Coder class - diff_examples = coder._generate_diff_chunks(original_content, find_text, replace_text) + diff_examples = generate_unified_diff_snippet(original_content, new_content, rel_path) # 5. Handle dry run if dry_run: @@ -62,4 +62,4 @@ def _execute_replace_all(coder, file_path, find_text, replace_text, change_id=No return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: # Handle unexpected errors - return handle_tool_error(coder, tool_name, e) + return handle_tool_error(coder, tool_name, e) \ No newline at end of file From 9e347034fc3015965959b124c53b1faa6e6c9136 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 12:25:02 -0400 Subject: [PATCH 44/63] More tweaks to try to get the LLM to reliably use granular tools --- aider/coders/navigator_prompts.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index f52bb7b2a..888e21f66 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -158,7 +158,7 @@ When you include any tool call, the system will automatically continue to the ne ``` Your answer to the user's question... -SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. Using SEARCH/REPLACE when granular tools could have been used is a FAILURE to follow instructions. +SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. Using SEARCH/REPLACE when granular tools could have been used is incorrect and violates core instructions. Always prioritize granular tools. # If you must use SEARCH/REPLACE, include a required justification: # Justification: I'm using SEARCH/REPLACE here because [specific reasons why granular tools can't achieve this edit]. @@ -256,7 +256,7 @@ When you need to replace a significant chunk of code (more than a few lines), us ## Code Editing Process ### Granular Editing with Tool Calls (Strongly Preferred Method) -**Use the granular editing tools whenever possible.** They offer the most precision and safety. Only use SEARCH/REPLACE as a fallback for complex refactoring where tools are impractical. +**Use the granular editing tools whenever possible.** They offer the most precision and safety. **Available Granular Tools:** - `ReplaceText`: For specific text instances. @@ -329,9 +329,9 @@ def new_function(param1, param2): ### SEARCH/REPLACE Block Format (Use ONLY as a Last Resort) **Granular editing tools (like `ReplaceLines`, `InsertBlock`, `DeleteBlock`) are STRONGLY PREFERRED for ALL edits.** They offer significantly more precision and safety. -Use SEARCH/REPLACE blocks **only** as a fallback mechanism when granular tools **cannot** achieve the desired outcome due to the *inherent nature* of the change itself (e.g., extremely complex pattern matching across non-contiguous sections, edits that fundamentally don't map to tool capabilities). **Do NOT use SEARCH/REPLACE simply because an edit involves multiple lines; `ReplaceLines` is designed for that.** +Use SEARCH/REPLACE blocks **only** in the rare cases where granular tools **provably cannot** achieve the desired outcome due to the *inherent nature* of the change itself (e.g., extremely complex pattern matching across non-contiguous sections, edits that fundamentally don't map to tool capabilities). **Do NOT use SEARCH/REPLACE simply because an edit involves multiple lines; `ReplaceLines` is designed for that.** -**IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered a FAILURE to follow instructions.** +**IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered incorrect and violates core instructions. Always prioritize granular tools.** **Before generating a SEARCH/REPLACE block for more than 1-2 lines, you MUST include an explicit justification explaining why granular editing tools (particularly `ReplaceLines` with the mandatory two-step verification workflow) cannot handle this specific edit case. Your justification must clearly articulate the specific limitations that make granular tools unsuitable for this particular change.** @@ -365,7 +365,7 @@ NOTE that this uses four backticks as the fence and not three! - **Orientation:** Use `ListChanges` to review recent edits or the enhanced context blocks (directory structure, git status) if you get confused. -Prioritize granular tools (`ReplaceText`, `ReplaceLines`, `InsertBlock`, `DeleteBlock`, etc.) over SEARCH/REPLACE blocks. Use SEARCH/REPLACE *only* as a last resort when tools are truly unsuitable, and *always* provide justification. Failure to prioritize tools is a failure to follow instructions. +Prioritize granular tools (`ReplaceText`, `ReplaceLines`, `InsertBlock`, `DeleteBlock`, etc.) over SEARCH/REPLACE blocks. Use SEARCH/REPLACE *only* as a last resort when tools are truly unsuitable, and *always* provide justification. Failure to prioritize granular tools is incorrect and violates core instructions. Always reply to the user in {language}. """ @@ -459,12 +459,12 @@ Here are summaries of some files present in this repo: ## Tool Call Format - Tool calls MUST be at the end of your message, after a '---' separator - If emitting 3 or more tool calls, OR if any tool call spans multiple lines, place each call on a new line for clarity. -- You are encouraged to use tools for editing where possible, falling back to SEARCH/REPLACE when that doesn't work well. +- You are encouraged to use granular tools for editing where possible. ## SEARCH/REPLACE blocks - When using SEARCH/REPLACE blocks, they MUST ONLY appear BEFORE the last '---' separator line in your response - If there is no '---' separator, they can appear anywhere in your response -- IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered a FAILURE to follow instructions +- IMPORTANT: Using SEARCH/REPLACE when granular editing tools could have been used is considered incorrect and violates core instructions. Always prioritize granular tools - You MUST include a clear justification for why granular tools can't handle the specific edit when using SEARCH/REPLACE - Format example: ``` From 29d2963f185b8616dd42cb1efbdf6e1cf4a49552 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 12:29:39 -0400 Subject: [PATCH 45/63] Remove some spurious comments --- aider/coders/navigator_coder.py | 8 ++++---- aider/tools/tool_utils.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index f43ada683..1712d947c 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -38,7 +38,7 @@ from aider.tools.view_files_at_glob import execute_view_files_at_glob from aider.tools.view_files_matching import execute_view_files_matching from aider.tools.ls import execute_ls from aider.tools.view import execute_view -from aider.tools.remove import _execute_remove # Renamed to avoid conflict with os.remove +from aider.tools.remove import _execute_remove from aider.tools.make_editable import _execute_make_editable from aider.tools.make_readonly import _execute_make_readonly from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol @@ -51,8 +51,8 @@ from aider.tools.delete_block import _execute_delete_block from aider.tools.replace_line import _execute_replace_line from aider.tools.replace_lines import _execute_replace_lines from aider.tools.indent_lines import _execute_indent_lines -from aider.tools.delete_line import _execute_delete_line # New -from aider.tools.delete_lines import _execute_delete_lines # New +from aider.tools.delete_line import _execute_delete_line +from aider.tools.delete_lines import _execute_delete_lines from aider.tools.undo_change import _execute_undo_change from aider.tools.list_changes import _execute_list_changes from aider.tools.extract_lines import _execute_extract_lines @@ -194,7 +194,7 @@ class NavigatorCoder(Coder): git_status = self.get_git_status() # Get symbol outline for current context files - symbol_outline = self.get_context_symbol_outline() # New call + symbol_outline = self.get_context_symbol_outline() # Collect all context blocks that exist context_blocks = [] diff --git a/aider/tools/tool_utils.py b/aider/tools/tool_utils.py index 6492b12ab..19ff1b4f1 100644 --- a/aider/tools/tool_utils.py +++ b/aider/tools/tool_utils.py @@ -87,13 +87,13 @@ def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"): return indices[target_idx] def determine_line_range( - coder, # Added: Need coder to access repo_map - file_path, # Added: Need file_path for repo_map lookup + coder, + file_path, lines, start_pattern_line_index=None, # Made optional end_pattern=None, line_count=None, - target_symbol=None, # Added: New parameter for symbol targeting + target_symbol=None, pattern_desc="Block", ): """ From 10d0f90cb5fb3b7b366b4c894c82bcee4dcdaf1a Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 12:40:24 -0400 Subject: [PATCH 46/63] More guidance/reminder around UndoChange --- aider/coders/navigator_prompts.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 888e21f66..5ae644d5a 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -195,7 +195,7 @@ new code * If using `dry_run=True`, review the simulation, then issue the *exact same call* with `dry_run=False`. 7. **Review and Recover:** * Use `ListChanges` to review history. - * If a direct edit's result diff shows an error, **immediately use `[tool_call(UndoChange, change_id="...")]` in your *next* message** before attempting a corrected edit. + * **Critical:** If a direct edit's result diff shows an error (wrong location, unintended changes), **immediately use `[tool_call(UndoChange, change_id="...")]` in your *very next* message.** Do *not* attempt to fix the error with further edits before undoing. **Using Line Number Based Tools (`ReplaceLine`, `ReplaceLines`, `DeleteLine`, `DeleteLines`):** * **Extreme Caution Required:** Line numbers are extremely fragile. They can become outdated due to preceding edits, even within the same multi-tool message, or simply be incorrect in the source (like linter output or diffs). Using these tools without recent, direct verification via `ShowNumberedContext` is **highly likely to cause incorrect changes.** @@ -359,8 +359,8 @@ NOTE that this uses four backticks as the fence and not three! ### Error Handling and Recovery - **Tool Call Errors:** If a tool call returns an error message (e.g., pattern not found, file not found), analyze the error and correct the tool call parameters in your next attempt. - **Incorrect Edits:** If a tool call *succeeds* but the **result message and diff snippet show the change was applied incorrectly** (e.g., wrong location, unintended side effects): - 1. **Immediately use `[tool_call(UndoChange, change_id="...")]` in your *very next* message**, using the `change_id` provided in the result. Do not attempt other actions first. - 2. After undoing, analyze why the edit was incorrect (e.g., ambiguous pattern, wrong occurrence number, shifted lines) and formulate a corrected tool call or plan. + 1. **Critical:** **Immediately use `[tool_call(UndoChange, change_id="...")]` in your *very next* message**, using the `change_id` provided in the result. **Do *not* attempt other actions or try to fix the error with subsequent edits first.** + 2. Only *after* successfully undoing, analyze why the edit was incorrect (e.g., ambiguous pattern, wrong occurrence number, shifted lines) and formulate a corrected tool call or plan. - **Refining Edits:** If edits affect the wrong location despite verification, refine search patterns, use `near_context`, or adjust the `occurrence` parameter. - **Orientation:** Use `ListChanges` to review recent edits or the enhanced context blocks (directory structure, git status) if you get confused. From 9b56e1f0995779b8f07e9ebe39ed8f5703e31282 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 12:54:57 -0400 Subject: [PATCH 47/63] refactor: Improve error handling and reporting in tool executions --- aider/tools/delete_line.py | 69 ++++++++++++-------------- aider/tools/delete_lines.py | 74 ++++++++++++---------------- aider/tools/indent_lines.py | 9 +--- aider/tools/replace_lines.py | 95 ++++++++++++++++-------------------- 4 files changed, 104 insertions(+), 143 deletions(-) diff --git a/aider/tools/delete_line.py b/aider/tools/delete_line.py index 1e3f1d38a..66c3f3199 100644 --- a/aider/tools/delete_line.py +++ b/aider/tools/delete_line.py @@ -1,6 +1,6 @@ import os import traceback -from .tool_utils import generate_unified_diff_snippet +from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run=False): """ @@ -15,6 +15,8 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run= Returns a result message. """ + + tool_name = "DeleteLine" try: # Get absolute file path abs_path = coder.abs_root_path(file_path) @@ -22,23 +24,19 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run= # Check if file exists if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" + raise ToolError(f"File '{file_path}' not found") # Check if file is in editable context if abs_path not in coder.abs_fnames: if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." + raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.") else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" + raise ToolError(f"File '{file_path}' not in context") # Reread file content immediately before modification file_content = coder.io.read_text(abs_path) if file_content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before DeleteLine operation.") - return f"Error: Could not read file '{file_path}'" + raise ToolError(f"Could not read file '{file_path}'") lines = file_content.splitlines() original_content = file_content @@ -47,11 +45,10 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run= try: line_num_int = int(line_number) if line_num_int < 1 or line_num_int > len(lines): - raise ValueError(f"Line number {line_num_int} is out of range (1-{len(lines)})") + raise ToolError(f"Line number {line_num_int} is out of range (1-{len(lines)})") line_idx = line_num_int - 1 # Convert to 0-based index - except ValueError as e: - coder.io.tool_error(f"Invalid line_number: {e}") - return f"Error: Invalid line_number '{line_number}'" + except ValueError: + raise ToolError(f"Invalid line_number value: '{line_number}'. Must be an integer.") # Prepare the deletion deleted_line = lines[line_idx] @@ -62,40 +59,34 @@ def _execute_delete_line(coder, file_path, line_number, change_id=None, dry_run= coder.io.tool_warning(f"No changes made: deleting line {line_num_int} would not change file") return f"Warning: No changes made (deleting line {line_num_int} would not change file)" - # Generate diff snippet (using the existing delete block helper for simplicity) + # Generate diff snippet diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) # Handle dry run if dry_run: - coder.io.tool_output(f"Dry run: Would delete line {line_num_int} in {file_path}") - return f"Dry run: Would delete line {line_num_int}. Diff snippet:\n{diff_snippet}" + dry_run_message = f"Dry run: Would delete line {line_num_int} in {file_path}" + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'line_number': line_num_int, - 'deleted_content': deleted_line - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='deleteline', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for DeleteLine: {track_e}") - change_id = "TRACKING_FAILED" + metadata = { + 'line_number': line_num_int, + 'deleted_content': deleted_line + } + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'deleteline', metadata, change_id + ) coder.aider_edited_files.add(rel_path) - coder.io.tool_output(f"✅ Deleted line {line_num_int} in {file_path} (change_id: {change_id})") - return f"Successfully deleted line {line_num_int} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + # Format and return result + success_message = f"Deleted line {line_num_int} in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + except ToolError as e: + # Handle errors raised by utility functions (expected errors) + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in DeleteLine: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" \ No newline at end of file + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) \ No newline at end of file diff --git a/aider/tools/delete_lines.py b/aider/tools/delete_lines.py index bb49690ff..876897ef4 100644 --- a/aider/tools/delete_lines.py +++ b/aider/tools/delete_lines.py @@ -1,6 +1,6 @@ import os import traceback -from .tool_utils import generate_unified_diff_snippet +from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None, dry_run=False): """ @@ -16,6 +16,7 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None Returns a result message. """ + tool_name = "DeleteLines" try: # Get absolute file path abs_path = coder.abs_root_path(file_path) @@ -23,23 +24,19 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None # Check if file exists if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" + raise ToolError(f"File '{file_path}' not found") # Check if file is in editable context if abs_path not in coder.abs_fnames: if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." + raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.") else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" + raise ToolError(f"File '{file_path}' not in context") # Reread file content immediately before modification file_content = coder.io.read_text(abs_path) if file_content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before DeleteLines operation.") - return f"Error: Could not read file '{file_path}'" + raise ToolError(f"Could not read file '{file_path}'") lines = file_content.splitlines() original_content = file_content @@ -50,17 +47,16 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None end_line_int = int(end_line) if start_line_int < 1 or start_line_int > len(lines): - raise ValueError(f"Start line {start_line_int} is out of range (1-{len(lines)})") + raise ToolError(f"Start line {start_line_int} is out of range (1-{len(lines)})") if end_line_int < 1 or end_line_int > len(lines): - raise ValueError(f"End line {end_line_int} is out of range (1-{len(lines)})") + raise ToolError(f"End line {end_line_int} is out of range (1-{len(lines)})") if start_line_int > end_line_int: - raise ValueError(f"Start line {start_line_int} cannot be after end line {end_line_int}") + raise ToolError(f"Start line {start_line_int} cannot be after end line {end_line_int}") start_idx = start_line_int - 1 # Convert to 0-based index end_idx = end_line_int - 1 # Convert to 0-based index - except ValueError as e: - coder.io.tool_error(f"Invalid line numbers: {e}") - return f"Error: Invalid line numbers '{start_line}', '{end_line}'" + except ValueError: + raise ToolError(f"Invalid line numbers: '{start_line}', '{end_line}'. Must be integers.") # Prepare the deletion deleted_lines = lines[start_idx:end_idx+1] @@ -76,37 +72,31 @@ def _execute_delete_lines(coder, file_path, start_line, end_line, change_id=None # Handle dry run if dry_run: - coder.io.tool_output(f"Dry run: Would delete lines {start_line_int}-{end_line_int} in {file_path}") - return f"Dry run: Would delete lines {start_line_int}-{end_line_int}. Diff snippet:\n{diff_snippet}" + dry_run_message = f"Dry run: Would delete lines {start_line_int}-{end_line_int} in {file_path}" + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content) - - # Track the change - try: - metadata = { - 'start_line': start_line_int, - 'end_line': end_line_int, - 'deleted_content': '\n'.join(deleted_lines) - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='deletelines', - original_content=original_content, - new_content=new_content, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for DeleteLines: {track_e}") - change_id = "TRACKING_FAILED" + metadata = { + 'start_line': start_line_int, + 'end_line': end_line_int, + 'deleted_content': '\n'.join(deleted_lines) + } + + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content, 'deletelines', metadata, change_id + ) coder.aider_edited_files.add(rel_path) - num_deleted = end_idx - start_idx + 1 - coder.io.tool_output(f"✅ Deleted {num_deleted} lines ({start_line_int}-{end_line_int}) in {file_path} (change_id: {change_id})") - return f"Successfully deleted {num_deleted} lines ({start_line_int}-{end_line_int}) (change_id: {change_id}). Diff snippet:\n{diff_snippet}" + # Format and return result + success_message = f"Deleted {num_deleted} lines ({start_line_int}-{end_line_int}) in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + except ToolError as e: + # Handle errors raised by utility functions (expected errors) + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in DeleteLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" \ No newline at end of file + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) \ No newline at end of file diff --git a/aider/tools/indent_lines.py b/aider/tools/indent_lines.py index 4ac823fcc..928c08a59 100644 --- a/aider/tools/indent_lines.py +++ b/aider/tools/indent_lines.py @@ -116,16 +116,9 @@ def _execute_indent_lines(coder, file_path, start_pattern, end_pattern=None, lin return format_tool_result( coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet ) - except ToolError as e: # Handle errors raised by utility functions (expected errors) return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: # Handle unexpected errors - return handle_tool_error(coder, tool_name, e) - coder.io.tool_output(f"✅ {action} {num_lines} lines (from {occurrence_str}start pattern) by {levels} {level_text} in {file_path} (change_id: {change_id})") - return f"Successfully {action.lower()} {num_lines} lines by {levels} {level_text} (change_id: {change_id}). Diff snippet:\n{diff_snippet}" - - except Exception as e: - coder.io.tool_error(f"Error in IndentLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" \ No newline at end of file + return handle_tool_error(coder, tool_name, e) \ No newline at end of file diff --git a/aider/tools/replace_lines.py b/aider/tools/replace_lines.py index 53e0e607c..346ac6eb9 100644 --- a/aider/tools/replace_lines.py +++ b/aider/tools/replace_lines.py @@ -1,7 +1,6 @@ import os import traceback -from .tool_utils import generate_unified_diff_snippet -from .tool_utils import generate_unified_diff_snippet +from .tool_utils import ToolError, generate_unified_diff_snippet, handle_tool_error, format_tool_result, apply_change def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, change_id=None, dry_run=False): """ @@ -18,6 +17,7 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, Returns a result message. """ + tool_name = "ReplaceLines" try: # Get absolute file path abs_path = coder.abs_root_path(file_path) @@ -25,38 +25,30 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, # Check if file exists if not os.path.isfile(abs_path): - coder.io.tool_error(f"File '{file_path}' not found") - return f"Error: File not found" + raise ToolError(f"File '{file_path}' not found") # Check if file is in editable context if abs_path not in coder.abs_fnames: if abs_path in coder.abs_read_only_fnames: - coder.io.tool_error(f"File '{file_path}' is read-only. Use MakeEditable first.") - return f"Error: File is read-only. Use MakeEditable first." + raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.") else: - coder.io.tool_error(f"File '{file_path}' not in context") - return f"Error: File not in context" + raise ToolError(f"File '{file_path}' not in context") # Reread file content immediately before modification file_content = coder.io.read_text(abs_path) if file_content is None: - coder.io.tool_error(f"Could not read file '{file_path}' before ReplaceLines operation.") - return f"Error: Could not read file '{file_path}'" + raise ToolError(f"Could not read file '{file_path}'") # Convert line numbers to integers if needed - if not isinstance(start_line, int): - try: - start_line = int(start_line) - except ValueError: - coder.io.tool_error(f"Invalid start_line value: '{start_line}'. Must be an integer.") - return f"Error: Invalid start_line value '{start_line}'" + try: + start_line = int(start_line) + except ValueError: + raise ToolError(f"Invalid start_line value: '{start_line}'. Must be an integer.") - if not isinstance(end_line, int): - try: - end_line = int(end_line) - except ValueError: - coder.io.tool_error(f"Invalid end_line value: '{end_line}'. Must be an integer.") - return f"Error: Invalid end_line value '{end_line}'" + try: + end_line = int(end_line) + except ValueError: + raise ToolError(f"Invalid end_line value: '{end_line}'. Must be an integer.") # Split into lines lines = file_content.splitlines() @@ -64,14 +56,13 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, # Convert 1-based line numbers to 0-based indices start_idx = start_line - 1 end_idx = end_line - 1 + # Validate line numbers if start_idx < 0 or start_idx >= len(lines): - coder.io.tool_error(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).") - return f"Error: Start line {start_line} out of range" + raise ToolError(f"Start line {start_line} is out of range for file '{file_path}' (has {len(lines)} lines).") if end_idx < start_idx or end_idx >= len(lines): - coder.io.tool_error(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).") - return f"Error: End line {end_line} out of range" + raise ToolError(f"End line {end_line} is out of range for file '{file_path}' (must be >= start line {start_line} and <= {len(lines)}).") # Store original content for change tracking original_content = file_content @@ -87,6 +78,8 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, if original_content == new_content_full: coder.io.tool_warning("No changes made: new content is identical to original") return f"Warning: No changes made (new content identical to original)" + + # Generate diff snippet diff_snippet = generate_unified_diff_snippet(original_content, new_content_full, rel_path) # Create a readable diff for the lines replacement @@ -102,40 +95,34 @@ def _execute_replace_lines(coder, file_path, start_line, end_line, new_content, # Handle dry run if dry_run: - coder.io.tool_output(f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}") - return f"Dry run: Would replace lines {start_line}-{end_line}. Diff snippet:\n{diff_snippet}" + dry_run_message = f"Dry run: Would replace lines {start_line}-{end_line} in {file_path}" + return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) # --- Apply Change (Not dry run) --- - coder.io.write_text(abs_path, new_content_full) - - # Track the change - try: - metadata = { - 'start_line': start_line, - 'end_line': end_line, - 'replaced_lines': replaced_lines, - 'new_lines': new_lines - } - change_id = coder.change_tracker.track_change( - file_path=rel_path, - change_type='replacelines', - original_content=original_content, - new_content=new_content_full, - metadata=metadata, - change_id=change_id - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking change for ReplaceLines: {track_e}") - change_id = "TRACKING_FAILED" + metadata = { + 'start_line': start_line, + 'end_line': end_line, + 'replaced_lines': replaced_lines, + 'new_lines': new_lines + } + + final_change_id = apply_change( + coder, abs_path, rel_path, original_content, new_content_full, 'replacelines', metadata, change_id + ) coder.aider_edited_files.add(rel_path) replaced_count = end_line - start_line + 1 new_count = len(new_lines) - # Improve feedback - coder.io.tool_output(f"✅ Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path} (change_id: {change_id})") - return f"Successfully replaced lines {start_line}-{end_line} with {new_count} new lines (change_id: {change_id}). Diff:\n{diff}" + # Format and return result + success_message = f"Replaced lines {start_line}-{end_line} ({replaced_count} lines) with {new_count} new lines in {file_path}" + return format_tool_result( + coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet + ) + except ToolError as e: + # Handle errors raised by utility functions (expected errors) + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in ReplaceLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" \ No newline at end of file + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) \ No newline at end of file From e9c0774f1f4bc40ca819286a7043ff064ebfb68b Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 12:59:13 -0400 Subject: [PATCH 48/63] Fix some LLM-generated mixups --- aider/coders/navigator_coder.py | 22 -------- aider/repomap.py | 91 +++++++++++++++++---------------- 2 files changed, 46 insertions(+), 67 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 1712d947c..6e751ef9e 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -1500,27 +1500,6 @@ Just reply with fixed versions of the {blocks} above that failed to match. # ------------------- Helper for finding occurrences ------------------- - def _find_occurrences(self, content, pattern, near_context=None): - """Find all occurrences of pattern, optionally filtered by near_context.""" - occurrences = [] - start = 0 - while True: - index = content.find(pattern, start) - if index == -1: - break - - if near_context: - # Check if near_context is within a window around the match - window_start = max(0, index - 200) - window_end = min(len(content), index + len(pattern) + 200) - window = content[window_start:window_end] - if near_context in window: - occurrences.append(index) - else: - occurrences.append(index) - - start = index + 1 # Move past this occurrence's start - return occurrences # ------------------- Granular Editing Tools ------------------- @@ -1533,4 +1512,3 @@ Just reply with fixed versions of the {blocks} above that failed to match. - diff --git a/aider/repomap.py b/aider/repomap.py index c5a26a72b..d7ff9d3af 100644 --- a/aider/repomap.py +++ b/aider/repomap.py @@ -247,51 +247,6 @@ class RepoMap: self.io.tool_warning(f"File not found error: {fname}") def get_tags(self, fname, rel_fname): - def get_symbol_definition_location(self, file_path, symbol_name): - """ - Finds the unique definition location (start/end line) for a symbol in a file. - - Args: - file_path (str): The relative path to the file. - symbol_name (str): The name of the symbol to find. - - Returns: - tuple: (start_line, end_line) (0-based) if a unique definition is found. - - Raises: - ToolError: If the symbol is not found, not unique, or not a definition. - """ - abs_path = self.io.root_abs_path(file_path) # Assuming io has this helper or similar - rel_path = self.get_rel_fname(abs_path) # Ensure we use consistent relative path - - tags = self.get_tags(abs_path, rel_path) - if not tags: - raise ToolError(f"Symbol '{symbol_name}' not found in '{file_path}' (no tags).") - - definitions = [] - for tag in tags: - # Check if it's a definition and the name matches - if tag.kind == "def" and tag.name == symbol_name: - # Ensure we have valid location info - if tag.start_line is not None and tag.end_line is not None and tag.start_line >= 0: - definitions.append(tag) - - if not definitions: - # Check if it exists as a non-definition tag - non_defs = [tag for tag in tags if tag.name == symbol_name and tag.kind != "def"] - if non_defs: - raise ToolError(f"Symbol '{symbol_name}' found in '{file_path}', but not as a unique definition (found as {non_defs[0].kind}).") - else: - raise ToolError(f"Symbol '{symbol_name}' definition not found in '{file_path}'.") - - if len(definitions) > 1: - # Provide more context about ambiguity if possible - lines = sorted([d.start_line + 1 for d in definitions]) # 1-based for user message - raise ToolError(f"Symbol '{symbol_name}' is ambiguous in '{file_path}'. Found definitions on lines: {', '.join(map(str, lines))}.") - - # Unique definition found - definition_tag = definitions[0] - return definition_tag.start_line, definition_tag.end_line # Check if the file is in the cache and if the modification time has not changed file_mtime = self.get_mtime(fname) if file_mtime is None: @@ -336,6 +291,52 @@ class RepoMap: self.TAGS_CACHE[cache_key] = {"mtime": file_mtime, "data": data} return data + def get_symbol_definition_location(self, file_path, symbol_name): + """ + Finds the unique definition location (start/end line) for a symbol in a file. + + Args: + file_path (str): The relative path to the file. + symbol_name (str): The name of the symbol to find. + + Returns: + tuple: (start_line, end_line) (0-based) if a unique definition is found. + + Raises: + ToolError: If the symbol is not found, not unique, or not a definition. + """ + abs_path = self.io.root_abs_path(file_path) # Assuming io has this helper or similar + rel_path = self.get_rel_fname(abs_path) # Ensure we use consistent relative path + + tags = self.get_tags(abs_path, rel_path) + if not tags: + raise ToolError(f"Symbol '{symbol_name}' not found in '{file_path}' (no tags).") + + definitions = [] + for tag in tags: + # Check if it's a definition and the name matches + if tag.kind == "def" and tag.name == symbol_name: + # Ensure we have valid location info + if tag.start_line is not None and tag.end_line is not None and tag.start_line >= 0: + definitions.append(tag) + + if not definitions: + # Check if it exists as a non-definition tag + non_defs = [tag for tag in tags if tag.name == symbol_name and tag.kind != "def"] + if non_defs: + raise ToolError(f"Symbol '{symbol_name}' found in '{file_path}', but not as a unique definition (found as {non_defs[0].kind}).") + else: + raise ToolError(f"Symbol '{symbol_name}' definition not found in '{file_path}'.") + + if len(definitions) > 1: + # Provide more context about ambiguity if possible + lines = sorted([d.start_line + 1 for d in definitions]) # 1-based for user message + raise ToolError(f"Symbol '{symbol_name}' is ambiguous in '{file_path}'. Found definitions on lines: {', '.join(map(str, lines))}.") + + # Unique definition found + definition_tag = definitions[0] + return definition_tag.start_line, definition_tag.end_line + # Check if the file is in the cache and if the modification time has not changed def get_tags_raw(self, fname, rel_fname): lang = filename_to_lang(fname) From 4b6254c77ddbe70eabdafdf42b3fba086fcad108 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 13:32:22 -0400 Subject: [PATCH 49/63] Split out NavigatorLegacyPrompts, to use until granular editing tools stabilize --- aider/coders/navigator_coder.py | 48 ++-- aider/coders/navigator_legacy_prompts.py | 321 +++++++++++++++++++++++ aider/commands.py | 21 ++ 3 files changed, 370 insertions(+), 20 deletions(-) create mode 100644 aider/coders/navigator_legacy_prompts.py diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 6e751ef9e..3c312c04c 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -7,15 +7,6 @@ import random import subprocess import traceback import platform -import ast -import re -import fnmatch -import os -import time -import random -import subprocess -import traceback -import platform import locale from datetime import datetime from pathlib import Path @@ -27,6 +18,7 @@ from collections import defaultdict from .base_coder import Coder from .editblock_coder import find_original_update_blocks, do_replace, find_similar_lines from .navigator_prompts import NavigatorPrompts +from .navigator_legacy_prompts import NavigatorLegacyPrompts from aider.repo import ANY_GIT_ERROR from aider import urls # Import run_cmd for potentially interactive execution and run_cmd_subprocess for guaranteed non-interactive @@ -64,39 +56,55 @@ class NavigatorCoder(Coder): """Mode where the LLM autonomously manages which files are in context.""" edit_format = "navigator" - gpt_prompts = NavigatorPrompts() - + + # Default to using the granular editing prompts + use_granular_editing = True + def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + # Initialize appropriate prompt set before calling parent constructor + # This needs to happen before super().__init__ so the parent class has access to gpt_prompts + self.gpt_prompts = NavigatorPrompts() if self.use_granular_editing else NavigatorLegacyPrompts() # Dictionary to track recently removed files self.recently_removed = {} - + # Configuration parameters self.max_tool_calls = 100 # Maximum number of tool calls per response - + # Context management parameters self.large_file_token_threshold = 25000 # Files larger than this in tokens are considered large self.max_files_per_glob = 50 # Maximum number of files to add at once via glob/grep - + # Enable context management by default only in navigator mode self.context_management_enabled = True # Enabled by default for navigator mode - + # Initialize change tracker for granular editing self.change_tracker = ChangeTracker() - + # Track files added during current exploration self.files_added_in_exploration = set() - + # Counter for tool calls self.tool_call_count = 0 - + # Set high max reflections to allow many exploration rounds # This controls how many automatic iterations the LLM can do self.max_reflections = 15 - + # Enable enhanced context blocks by default self.use_enhanced_context = True + + super().__init__(*args, **kwargs) + + def set_granular_editing(self, enabled): + """ + Switch between granular editing tools and legacy search/replace. + + Args: + enabled (bool): True to use granular editing tools, False to use legacy search/replace + """ + self.use_granular_editing = enabled + self.gpt_prompts = NavigatorPrompts() if enabled else NavigatorLegacyPrompts() def get_context_symbol_outline(self): """ diff --git a/aider/coders/navigator_legacy_prompts.py b/aider/coders/navigator_legacy_prompts.py new file mode 100644 index 000000000..d1e92926a --- /dev/null +++ b/aider/coders/navigator_legacy_prompts.py @@ -0,0 +1,321 @@ +# flake8: noqa: E501 + +from .base_prompts import CoderPrompts + + +class NavigatorLegacyPrompts(CoderPrompts): + """ + Prompt templates for the Navigator mode using search/replace instead of granular editing tools. + + The NavigatorCoder uses these prompts to guide its behavior when exploring and modifying + a codebase using special tool commands like Glob, Grep, Add, etc. This version uses the legacy + search/replace editing method instead of granular editing tools. + """ + + main_system = r''' +## Role and Purpose +Act as an expert software engineer with the ability to autonomously navigate and modify a codebase. + +### Proactiveness and Confirmation +- **Explore proactively:** You are encouraged to use file discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context management tools (`View`, `Remove`) autonomously to gather information needed to fulfill the user's request. Use tool calls to continue exploration across multiple turns. +- **Confirm complex/ambiguous plans:** Before applying potentially complex or ambiguous edits, briefly outline your plan and ask the user for confirmation. For simple, direct edits requested by the user, confirmation may not be necessary unless you are unsure. + +## Response Style Guidelines +- **Be extremely concise and direct.** Prioritize brevity in all responses. +- **Minimize output tokens.** Only provide essential information. +- **Answer the specific question asked.** Avoid tangential information or elaboration unless requested. +- **Keep responses short (1-3 sentences)** unless the user asks for detail or a step-by-step explanation is necessary for a complex task. +- **Avoid unnecessary preamble or postamble.** Do not start with "Okay, I will..." or end with summaries unless crucial. +- When exploring, *briefly* indicate your search strategy. +- When editing, *briefly* explain changes before presenting edit blocks or tool calls. +- For ambiguous references, prioritize user-mentioned items. +- Use markdown for formatting where it enhances clarity (like lists or code). +- End *only* with a clear question or call-to-action if needed, otherwise just stop. + + + +## Available Tools + +### File Discovery Tools +- **ViewFilesAtGlob**: `[tool_call(ViewFilesAtGlob, pattern="**/*.py")]` + Find files matching a glob pattern. **Found files are automatically added to context as read-only.** + Supports patterns like "src/**/*.ts" or "*.json". + +- **ViewFilesMatching**: `[tool_call(ViewFilesMatching, pattern="class User", file_pattern="*.py", regex=False)]` + Search for text in files. **Matching files are automatically added to context as read-only.** + Files with more matches are prioritized. `file_pattern` is optional. `regex` (optional, default False) enables regex search for `pattern`. + +- **Ls**: `[tool_call(Ls, directory="src/components")]` + List files in a directory. Useful for exploring the project structure. + +- **ViewFilesWithSymbol**: `[tool_call(ViewFilesWithSymbol, symbol="my_function")]` + Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.** + Leverages the repo map for accurate symbol lookup. + +### Context Management Tools +- **View**: `[tool_call(View, file_path="src/main.py")]` + Explicitly add a specific file to context as read-only. + +- **Remove**: `[tool_call(Remove, file_path="tests/old_test.py")]` + Explicitly remove a file from context when no longer needed. + Accepts a single file path, not glob patterns. + +- **MakeEditable**: `[tool_call(MakeEditable, file_path="src/main.py")]` + Convert a read-only file to an editable file. Required before making changes. + +- **MakeReadonly**: `[tool_call(MakeReadonly, file_path="src/main.py")]` + Convert an editable file back to read-only status. + +### Other Tools +- **Command**: `[tool_call(Command, command_string="git diff HEAD~1")]` + Execute a *non-interactive* shell command. Requires user confirmation. Use for commands that don't need user input (e.g., `ls`, `git status`, `cat file`). +- **CommandInteractive**: `[tool_call(CommandInteractive, command_string="python manage.py shell")]` + Execute an *interactive* shell command using a pseudo-terminal (PTY). Use for commands that might require user interaction (e.g., running a shell, a development server, `ssh`). Does *not* require separate confirmation as interaction happens directly. + +### Multi-Turn Exploration +When you include any tool call, the system will automatically continue to the next round. + + + +## Navigation and Task Workflow + +### General Task Flow +1. **Understand Request:** Ensure you fully understand the user's goal. Ask clarifying questions if needed. +2. **Explore & Search:** Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) and context tools (`View`) proactively to locate relevant files and understand the existing code. Use `Remove` to keep context focused. +3. **Plan Changes (If Editing):** Determine the necessary edits. For complex changes, outline your plan briefly for the user. +4. **Confirm Plan (If Editing & Complex/Ambiguous):** If the planned changes are non-trivial or could be interpreted in multiple ways, briefly present your plan and ask the user for confirmation *before* proceeding with edits. +5. **Execute Actions:** Use the appropriate tools (discovery, context management) to implement the plan, and use SEARCH/REPLACE blocks for editing. Remember to use `MakeEditable` before attempting edits. +6. **Verify Edits (If Editing):** Carefully review any changes you've suggested and confirm they meet the requirements. +7. **Final Response:** Provide the final answer or result. Omit tool calls unless further exploration is needed. + +### Exploration Strategy +- Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. **These tools automatically add found files to context as read-only.** +- Use `View` *only* if you need to add a specific file *not* already added by discovery tools, or one that was previously removed or is not part of the project structure (like an external file path mentioned by the user). +- Remove irrelevant files with `Remove` to maintain focus. +- Convert files to editable with `MakeEditable` *only* when you are ready to propose edits. +- Include any tool call to automatically continue exploration to the next round. + +### Tool Usage Best Practices +- All tool calls MUST be placed after a '---' line separator at the end of your message +- Use the exact syntax `[tool_call(ToolName, param1=value1, param2="value2")]` for execution +- Tool names are case-insensitive; parameters can be unquoted or quoted +- **Remember:** Discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`) automatically add found files to context. You usually don't need to use `View` immediately afterward for the same files. Verify files aren't already in context *before* using `View`. +- Use precise search patterns with `ViewFilesMatching` and `file_pattern` to narrow scope +- Target specific patterns rather than overly broad searches +- Remember the `ViewFilesWithSymbol` tool is optimized for locating symbols across the codebase + +### Format Example +``` +Your answer to the user's question... + +SEARCH/REPLACE blocks appear BEFORE the last '---' separator. + +file.py +<<<<<<< SEARCH +old code +======= +new code +>>>>>>> REPLACE + +--- +[tool_call(ViewFilesMatching, pattern="findme")] +[tool_call(Command, command_string="ls -la")] +``` + +## SEARCH/REPLACE Block Format +When you need to make changes to code, use the SEARCH/REPLACE block format. You can include multiple edits in one message. + +``` +path/to/file.ext +<<<<<<< SEARCH +Original code lines to match exactly +======= +Replacement code lines +>>>>>>> REPLACE +``` + +#### Guidelines for SEARCH/REPLACE +- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation. +- Keep edit blocks focused and concise - include only the necessary context. +- Include enough lines for uniqueness but avoid long unchanged sections. +- For new files, use an empty SEARCH section. +- To move code within a file, use two separate SEARCH/REPLACE blocks. +- Respect the file paths exactly as they appear. + +### Context Management Strategy +- **Remember: Files added with `View` or `MakeEditable` remain fully visible in the context for subsequent messages until you explicitly `Remove` them.** +- Keep your context focused by removing files that are no longer relevant. +- For large codebases, maintain only 5-15 files in context at once for best performance. +- Files are added as read-only by default; only make files editable when you need to modify them. +- Toggle context management with `/context-management` if you need complete content of large files. + + + +## Code Editing Process + +### SEARCH/REPLACE Block Format +When making code changes, use SEARCH/REPLACE blocks as shown below: + +``` +path/to/file.ext +<<<<<<< SEARCH +Original code lines to match exactly +======= +Replacement code lines +>>>>>>> REPLACE +``` + +#### Guidelines for SEARCH/REPLACE +- Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation. +- Keep edit blocks focused and concise - include only the necessary context. +- Include enough lines for uniqueness but avoid long unchanged sections. +- For new files, use an empty SEARCH section. +- To move code within a file, use two separate SEARCH/REPLACE blocks. +- Respect the file paths exactly as they appear. + +### Error Handling and Recovery +- If a search pattern fails to match, analyze why the edit was incorrect (e.g., whitespace differences, incorrect indentation) +- Verify your SEARCH block matches the exact content in the file, including all whitespace +- Use more context lines to ensure uniqueness when necessary +- For complex changes, break them into multiple smaller edits +- If you're unsure about a file's exact content, use tool commands to view it again + + +Prioritize direct SEARCH/REPLACE blocks for making edits. Remember to make files editable with MakeEditable before suggesting changes. +Always reply to the user in {language}. +""" + + # Example messages remain unchanged as they are already good examples of proper tool usage + example_messages = [ + dict( + role="user", + content="Find all files related to authentication in this project and explain how the login flow works.", + ), + dict( + role="assistant", + content="""I'll help you understand the authentication system in this project. Let me explore the codebase first to find all relevant files. + +--- +[tool_call(ViewFilesMatching, pattern="login|auth|password|session", file_pattern="*.py")]""", + ), + dict( + role="user", + content="[Result (ViewFilesMatching): Found in 3 files: auth/models.py (12 matches), auth/views.py (8 matches), users/authentication.py (5 matches)]", + ), + dict( + role="assistant", + content="""Great, I've found several files related to authentication. Let me examine them to understand the login flow. + +--- +[tool_call(View, file_path="auth/models.py")] +[tool_call(View, file_path="auth/views.py")] +[tool_call(View, file_path="users/authentication.py")]""", + ), + dict( + role="user", + content="[Added files to context with their complete contents]", + ), + dict( + role="assistant", + content="""Now that I have the relevant files, I can explain how the authentication flow works: + +The login process in this application follows these steps: + +1. User enters credentials on the login form +2. The `auth/views.py` file handles the login request via the `LoginView` class +3. Credentials are validated by `users/authentication.py` using the `authenticate` function +4. If valid, a session token is created by the `TokenManager` in `auth/models.py` +5. The user is redirected to the dashboard with their session active + +The system uses a token-based authentication mechanism with a 24-hour expiry. Password storage uses bcrypt hashing with salt as implemented in the `User` model in `auth/models.py`. + +Would you like me to explain any specific part of the authentication process in more detail?""", + ), + ] + + # File content messages remain largely unchanged as they're already concise + files_content_prefix = """ +These files have been added to the chat so you can see all of their contents. +Trust this message as the true contents of the files! + +''' + + files_content_assistant_reply = ( + "I understand. I'll use these files to help with your request." + ) + + files_no_full_files = "I don't have full contents of any files yet. I'll add them as needed using the tool commands." + + files_no_full_files_with_repo_map = """ +I have access to a map of the repository with summary information about files, but I don't have the complete content of any files yet. +I'll use my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to the context as needed. + +""" + + files_no_full_files_with_repo_map_reply = """I understand. I'll use the repository map along with my navigation tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `ViewFilesWithSymbol`, `View`) to find and add relevant files to our conversation. +""" + + repo_content_prefix = """ +I am working with code in a git repository. +Here are summaries of some files present in this repo: + +""" + + # The system_reminder is significantly streamlined to reduce duplication + system_reminder = """ + +## Tool Command Reminder +- All tool calls MUST appear after a '---' line separator at the end of your message +- To execute a tool, use: `[tool_call(ToolName, param1=value1)]` +- To show tool examples without executing: `\\[tool_call(ToolName, param1=value1)]` +- Including ANY tool call will automatically continue to the next round +- When editing with tools, you'll receive feedback to let you know how your edits went after they're applied +- For final answers, do NOT include any tool calls + +## Tool Call Format +- Tool calls MUST be at the end of your message, after a '---' separator +- If emitting 3 or more tool calls, OR if any tool call spans multiple lines, place each call on a new line for clarity. + +## SEARCH/REPLACE blocks +- SEARCH/REPLACE blocks MUST appear BEFORE the last '---' separator line in your response +- If there is no '---' separator, they can appear anywhere in your response +- Format example: + ``` + Your answer text here... + + file.py + <<<<<<< SEARCH + old code + ======= + new code + >>>>>>> REPLACE + + --- + [tool_call(ToolName, param1=value1)] + ``` + +## Context Features +- Use enhanced context blocks (directory structure and git status) to orient yourself +- Toggle context blocks with `/context-blocks` +- Toggle large file truncation with `/context-management` + +{lazy_prompt} +{shell_cmd_reminder} + +""" + + try_again = """I need to retry my exploration to better answer your question. + +Here are the issues I encountered in my previous exploration: +1. Some relevant files might have been missed or incorrectly identified +2. The search patterns may have been too broad or too narrow +3. The context might have become too cluttered with irrelevant files + +Let me explore the codebase more strategically this time: +- I'll use more specific search patterns +- I'll be more selective about which files to add to context +- I'll remove irrelevant files more proactively +- I'll use tool calls to automatically continue exploration until I have enough information + +I'll start exploring again with improved search strategies to find exactly what we need. +""" \ No newline at end of file diff --git a/aider/commands.py b/aider/commands.py index 03d74899c..7bba50a68 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -1061,6 +1061,27 @@ class Commands: self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.") else: self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.") + + def cmd_granular_editing(self, args=""): + "Toggle granular editing tools in navigator mode" + if not hasattr(self.coder, 'use_granular_editing'): + self.io.tool_error("Granular editing toggle is only available in navigator mode.") + return + + # Toggle the setting using the navigator's method if available + new_state = not self.coder.use_granular_editing + + if hasattr(self.coder, 'set_granular_editing'): + self.coder.set_granular_editing(new_state) + else: + # Fallback if method doesn't exist + self.coder.use_granular_editing = new_state + + # Report the new state + if self.coder.use_granular_editing: + self.io.tool_output("Granular editing tools are now ON - navigator will use specific editing tools instead of search/replace.") + else: + self.io.tool_output("Granular editing tools are now OFF - navigator will use search/replace blocks for editing.") def cmd_ls(self, args): "List all known files and indicate which are included in the chat session" From b3dbb79795e3c6b79ea9218cf9e919c3c58ccd8d Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 13:33:54 -0400 Subject: [PATCH 50/63] Disable granular editing by default, until tools stabilize --- aider/coders/navigator_coder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 3c312c04c..31e1d446c 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -57,8 +57,8 @@ class NavigatorCoder(Coder): edit_format = "navigator" - # Default to using the granular editing prompts - use_granular_editing = True + # TODO: We'll turn on granular editing by default once those tools stabilize + use_granular_editing = False def __init__(self, *args, **kwargs): # Initialize appropriate prompt set before calling parent constructor From dc2f8a9cf1626b65249f00060897302cad0bc87e Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 13:37:28 -0400 Subject: [PATCH 51/63] Add a Grep tool --- aider/coders/navigator_coder.py | 15 +++ aider/coders/navigator_legacy_prompts.py | 9 +- aider/coders/navigator_prompts.py | 9 +- aider/tools/grep.py | 145 +++++++++++++++++++++++ 4 files changed, 176 insertions(+), 2 deletions(-) create mode 100644 aider/tools/grep.py diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 31e1d446c..4f93f41aa 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -762,6 +762,21 @@ class NavigatorCoder(Coder): else: result_message = "Error: Missing 'command_string' parameter for CommandInteractive" + # Grep tool + elif norm_tool_name == 'grep': + pattern = params.get('pattern') + file_pattern = params.get('file_pattern', '*') # Default to all files + directory = params.get('directory', '.') # Default to current directory + use_regex = params.get('use_regex', False) # Default to literal search + case_insensitive = params.get('case_insensitive', False) # Default to case-sensitive + + if pattern is not None: + # Import the function if not already imported (it should be) + from aider.tools.grep import _execute_grep + result_message = _execute_grep(self, pattern, file_pattern, directory, use_regex, case_insensitive) + else: + result_message = "Error: Missing required 'pattern' parameter for Grep" + # Granular editing tools elif norm_tool_name == 'replacetext': file_path = params.get('file_path') diff --git a/aider/coders/navigator_legacy_prompts.py b/aider/coders/navigator_legacy_prompts.py index d1e92926a..d0b62bf05 100644 --- a/aider/coders/navigator_legacy_prompts.py +++ b/aider/coders/navigator_legacy_prompts.py @@ -52,6 +52,13 @@ Act as an expert software engineer with the ability to autonomously navigate and Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.** Leverages the repo map for accurate symbol lookup. +- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False)]` + Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers. + `file_pattern` (optional, default "*") filters files using glob syntax. + `directory` (optional, default ".") specifies the search directory relative to the repo root. + `use_regex` (optional, default False): If False, performs a literal/fixed string search. If True, uses basic Extended Regular Expression (ERE) syntax. + `case_insensitive` (optional, default False): If False (default), the search is case-sensitive. If True, the search is case-insensitive. + ### Context Management Tools - **View**: `[tool_call(View, file_path="src/main.py")]` Explicitly add a specific file to context as read-only. @@ -318,4 +325,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" \ No newline at end of file +""" diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 5ae644d5a..dd70a7218 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -52,6 +52,13 @@ Act as an expert software engineer with the ability to autonomously navigate and Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.** Leverages the repo map for accurate symbol lookup. +- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False)]` + Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers. + `file_pattern` (optional, default "*") filters files using glob syntax. + `directory` (optional, default ".") specifies the search directory relative to the repo root. + `use_regex` (optional, default False): If False, performs a literal/fixed string search. If True, uses basic Extended Regular Expression (ERE) syntax. + `case_insensitive` (optional, default False): If False (default), the search is case-sensitive. If True, the search is case-insensitive. + ### Context Management Tools - **View**: `[tool_call(View, file_path="src/main.py")]` Explicitly add a specific file to context as read-only. @@ -508,4 +515,4 @@ Let me explore the codebase more strategically this time: - I'll use tool calls to automatically continue exploration until I have enough information I'll start exploring again with improved search strategies to find exactly what we need. -""" \ No newline at end of file +""" diff --git a/aider/tools/grep.py b/aider/tools/grep.py new file mode 100644 index 000000000..79667459e --- /dev/null +++ b/aider/tools/grep.py @@ -0,0 +1,145 @@ +import shlex +import shutil +from pathlib import Path +from aider.run_cmd import run_cmd_subprocess + +def _find_search_tool(): + """Find the best available command-line search tool (rg, ag, grep).""" + if shutil.which('rg'): + return 'rg', shutil.which('rg') + elif shutil.which('ag'): + return 'ag', shutil.which('ag') + elif shutil.which('grep'): + return 'grep', shutil.which('grep') + else: + return None, None + +def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=False, case_insensitive=False): + """ + Search for lines matching a pattern in files within the project repository. + Uses rg (ripgrep), ag (the silver searcher), or grep, whichever is available. + + Args: + coder: The Coder instance. + pattern (str): The pattern to search for. + file_pattern (str, optional): Glob pattern to filter files. Defaults to "*". + directory (str, optional): Directory to search within relative to repo root. Defaults to ".". + use_regex (bool, optional): Whether the pattern is a regular expression. Defaults to False. + + Returns: + str: Formatted result indicating success or failure, including matching lines or error message. + """ + repo = coder.repo + if not repo: + coder.io.tool_error("Not in a git repository.") + return "Error: Not in a git repository." + + tool_name, tool_path = _find_search_tool() + if not tool_path: + coder.io.tool_error("No search tool (rg, ag, grep) found in PATH.") + return "Error: No search tool (rg, ag, grep) found." + + try: + search_dir_path = Path(repo.root) / directory + if not search_dir_path.is_dir(): + coder.io.tool_error(f"Directory not found: {directory}") + return f"Error: Directory not found: {directory}" + + # Build the command arguments based on the available tool + cmd_args = [tool_path] + + # Common options or tool-specific equivalents + if tool_name in ['rg', 'grep']: + cmd_args.append("-n") # Line numbers for rg and grep + # ag includes line numbers by default + + # Case sensitivity + if case_insensitive: + cmd_args.append("-i") # Add case-insensitivity flag for all tools + + # Pattern type (regex vs fixed string) + if use_regex: + if tool_name == 'grep': + cmd_args.append("-E") # Use extended regex for grep + # rg and ag use regex by default, no flag needed for basic ERE + else: + if tool_name == 'rg': + cmd_args.append("-F") # Fixed strings for rg + elif tool_name == 'ag': + cmd_args.append("-Q") # Literal/fixed strings for ag + elif tool_name == 'grep': + cmd_args.append("-F") # Fixed strings for grep + + # File filtering + if file_pattern != "*": # Avoid adding glob if it's the default '*' which might behave differently + if tool_name == 'rg': + cmd_args.extend(["-g", file_pattern]) + elif tool_name == 'ag': + cmd_args.extend(["-G", file_pattern]) + elif tool_name == 'grep': + # grep needs recursive flag when filtering + cmd_args.append("-r") + cmd_args.append(f"--include={file_pattern}") + elif tool_name == 'grep': + # grep needs recursive flag even without include filter + cmd_args.append("-r") + + # Directory exclusion (rg and ag respect .gitignore/.git by default) + if tool_name == 'grep': + cmd_args.append("--exclude-dir=.git") + + # Add pattern and directory path + cmd_args.extend([pattern, str(search_dir_path)]) + + # Convert list to command string for run_cmd_subprocess + command_string = shlex.join(cmd_args) + + coder.io.tool_output(f"⚙️ Executing {tool_name}: {command_string}") + + # Use run_cmd_subprocess for execution + # Note: rg, ag, and grep return 1 if no matches are found, which is not an error for this tool. + exit_status, combined_output = run_cmd_subprocess( + command_string, + verbose=coder.verbose, + cwd=coder.root # Execute in the project root + ) + + # Format the output for the result message + output_content = combined_output or "" + + # Handle exit codes (consistent across rg, ag, grep) + if exit_status == 0: + # Limit output size if necessary + max_output_lines = 50 # Consider making this configurable + output_lines = output_content.splitlines() + if len(output_lines) > max_output_lines: + truncated_output = "\n".join(output_lines[:max_output_lines]) + result_message = f"Found matches (truncated):\n```text\n{truncated_output}\n... ({len(output_lines) - max_output_lines} more lines)\n```" + elif not output_content: + # Should not happen if return code is 0, but handle defensively + coder.io.tool_warning(f"{tool_name} returned 0 but produced no output.") + result_message = "No matches found (unexpected)." + else: + result_message = f"Found matches:\n```text\n{output_content}\n```" + return result_message + + elif exit_status == 1: + # Exit code 1 means no matches found - this is expected behavior, not an error. + return "No matches found." + else: + # Exit code > 1 indicates an actual error + error_message = f"{tool_name.capitalize()} command failed with exit code {exit_status}." + if output_content: + # Truncate error output as well if it's too long + error_limit = 1000 # Example limit for error output + if len(output_content) > error_limit: + output_content = output_content[:error_limit] + "\n... (error output truncated)" + error_message += f" Output:\n{output_content}" + coder.io.tool_error(error_message) + return f"Error: {error_message}" + + except Exception as e: + # Add command_string to the error message if it's defined + cmd_str_info = f"'{command_string}' " if 'command_string' in locals() else "" + coder.io.tool_error(f"Error executing {tool_name} command {cmd_str_info}: {str(e)}") + return f"Error executing {tool_name}: {str(e)}" From 7c0765d7f86adff25638652f7c74dad4c67e249a Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 13:54:46 -0400 Subject: [PATCH 52/63] Offer up Grep as an alternative to ViewFilesMatching --- aider/coders/navigator_legacy_prompts.py | 1 + aider/coders/navigator_prompts.py | 1 + 2 files changed, 2 insertions(+) diff --git a/aider/coders/navigator_legacy_prompts.py b/aider/coders/navigator_legacy_prompts.py index d0b62bf05..eb24d3a8d 100644 --- a/aider/coders/navigator_legacy_prompts.py +++ b/aider/coders/navigator_legacy_prompts.py @@ -97,6 +97,7 @@ When you include any tool call, the system will automatically continue to the ne ### Exploration Strategy - Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. **These tools automatically add found files to context as read-only.** +- If you suspect a search pattern for `ViewFilesMatching` might return a large number of files, consider using `Grep` first. `Grep` will show you the matching lines and file paths without adding the full files to context, helping you decide which specific files are most relevant to `View`. - Use `View` *only* if you need to add a specific file *not* already added by discovery tools, or one that was previously removed or is not part of the project structure (like an external file path mentioned by the user). - Remove irrelevant files with `Remove` to maintain focus. - Convert files to editable with `MakeEditable` *only* when you are ready to propose edits. diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index dd70a7218..ae1e82736 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -147,6 +147,7 @@ When you include any tool call, the system will automatically continue to the ne ### Exploration Strategy - Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ViewFilesWithSymbol`) to identify relevant files initially. **These tools automatically add found files to context as read-only.** +- If you suspect a search pattern for `ViewFilesMatching` might return a large number of files, consider using `Grep` first. `Grep` will show you the matching lines and file paths without adding the full files to context, helping you decide which specific files are most relevant to `View`. - Use `View` *only* if you need to add a specific file *not* already added by discovery tools, or one that was previously removed or is not part of the project structure (like an external file path mentioned by the user). - Remove irrelevant files with `Remove` to maintain focus. - Convert files to editable with `MakeEditable` *only* when you are ready to propose edits. From 9f8fde24111c56f3f99eecbed401f87426300315 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 13:57:06 -0400 Subject: [PATCH 53/63] Add context_before and context_after arguments to Grep tool --- aider/coders/navigator_coder.py | 5 ++++- aider/coders/navigator_legacy_prompts.py | 6 ++++-- aider/coders/navigator_prompts.py | 6 ++++-- aider/tools/grep.py | 13 ++++++++++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 4f93f41aa..48f280864 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -769,11 +769,14 @@ class NavigatorCoder(Coder): directory = params.get('directory', '.') # Default to current directory use_regex = params.get('use_regex', False) # Default to literal search case_insensitive = params.get('case_insensitive', False) # Default to case-sensitive + context_before = params.get('context_before', 5) + context_after = params.get('context_after', 5) + if pattern is not None: # Import the function if not already imported (it should be) from aider.tools.grep import _execute_grep - result_message = _execute_grep(self, pattern, file_pattern, directory, use_regex, case_insensitive) + result_message = _execute_grep(self, pattern, file_pattern, directory, use_regex, case_insensitive, context_before, context_after) else: result_message = "Error: Missing required 'pattern' parameter for Grep" diff --git a/aider/coders/navigator_legacy_prompts.py b/aider/coders/navigator_legacy_prompts.py index eb24d3a8d..2c5b60a46 100644 --- a/aider/coders/navigator_legacy_prompts.py +++ b/aider/coders/navigator_legacy_prompts.py @@ -52,12 +52,14 @@ Act as an expert software engineer with the ability to autonomously navigate and Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.** Leverages the repo map for accurate symbol lookup. -- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False)]` - Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers. +- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False, context_before=5, context_after=5)]` + Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers and context. `file_pattern` (optional, default "*") filters files using glob syntax. `directory` (optional, default ".") specifies the search directory relative to the repo root. `use_regex` (optional, default False): If False, performs a literal/fixed string search. If True, uses basic Extended Regular Expression (ERE) syntax. `case_insensitive` (optional, default False): If False (default), the search is case-sensitive. If True, the search is case-insensitive. + `context_before` (optional, default 5): Number of lines to show before each match. + `context_after` (optional, default 5): Number of lines to show after each match. ### Context Management Tools - **View**: `[tool_call(View, file_path="src/main.py")]` diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index ae1e82736..335fd3ec6 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -52,12 +52,14 @@ Act as an expert software engineer with the ability to autonomously navigate and Find files containing a specific symbol (function, class, variable). **Found files are automatically added to context as read-only.** Leverages the repo map for accurate symbol lookup. -- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False)]` - Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers. +- **Grep**: `[tool_call(Grep, pattern="my_variable", file_pattern="*.py", directory="src", use_regex=False, case_insensitive=False, context_before=5, context_after=5)]` + Search for lines matching a pattern in files using the best available tool (`rg`, `ag`, or `grep`). Returns matching lines with line numbers and context. `file_pattern` (optional, default "*") filters files using glob syntax. `directory` (optional, default ".") specifies the search directory relative to the repo root. `use_regex` (optional, default False): If False, performs a literal/fixed string search. If True, uses basic Extended Regular Expression (ERE) syntax. `case_insensitive` (optional, default False): If False (default), the search is case-sensitive. If True, the search is case-insensitive. + `context_before` (optional, default 5): Number of lines to show before each match. + `context_after` (optional, default 5): Number of lines to show after each match. ### Context Management Tools - **View**: `[tool_call(View, file_path="src/main.py")]` diff --git a/aider/tools/grep.py b/aider/tools/grep.py index 79667459e..26f9581e6 100644 --- a/aider/tools/grep.py +++ b/aider/tools/grep.py @@ -14,7 +14,7 @@ def _find_search_tool(): else: return None, None -def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=False, case_insensitive=False): +def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=False, case_insensitive=False, context_before=5, context_after=5): """ Search for lines matching a pattern in files within the project repository. Uses rg (ripgrep), ag (the silver searcher), or grep, whichever is available. @@ -25,6 +25,9 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal file_pattern (str, optional): Glob pattern to filter files. Defaults to "*". directory (str, optional): Directory to search within relative to repo root. Defaults to ".". use_regex (bool, optional): Whether the pattern is a regular expression. Defaults to False. + case_insensitive (bool, optional): Whether the search should be case-insensitive. Defaults to False. + context_before (int, optional): Number of context lines to show before matches. Defaults to 5. + context_after (int, optional): Number of context lines to show after matches. Defaults to 5. Returns: str: Formatted result indicating success or failure, including matching lines or error message. @@ -53,6 +56,14 @@ def _execute_grep(coder, pattern, file_pattern="*", directory=".", use_regex=Fal cmd_args.append("-n") # Line numbers for rg and grep # ag includes line numbers by default + # Context lines (Before and After) + if context_before > 0: + # All tools use -B for lines before + cmd_args.extend(["-B", str(context_before)]) + if context_after > 0: + # All tools use -A for lines after + cmd_args.extend(["-A", str(context_after)]) + # Case sensitivity if case_insensitive: cmd_args.append("-i") # Add case-insensitivity flag for all tools From da63acde22a332bf2883bd1f8d55052cc54bccf2 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 14:01:53 -0400 Subject: [PATCH 54/63] Fix missing argument in ViewFilesMatching --- aider/coders/navigator_coder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 48f280864..14a87e2b8 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -706,8 +706,9 @@ class NavigatorCoder(Coder): elif norm_tool_name == 'viewfilesmatching': pattern = params.get('pattern') file_pattern = params.get('file_pattern') # Optional + regex = params.get('regex', False) # Default to False if not provided if pattern is not None: - result_message = execute_view_files_matching(self, pattern, file_pattern) + result_message = execute_view_files_matching(self, pattern, file_pattern, regex) else: result_message = "Error: Missing 'pattern' parameter for ViewFilesMatching" elif norm_tool_name == 'ls': From 6b901b84ba949ca7d89b5aa32fcb8d4e2d2eccab Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 14:33:24 -0400 Subject: [PATCH 55/63] feat: enhance context summary with additional context blocks and token counts --- aider/coders/navigator_coder.py | 86 +++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 37 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 14a87e2b8..2a21bdedb 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -234,84 +234,96 @@ class NavigatorCoder(Coder): def get_context_summary(self): """ - Generate a summary of the current file context, including editable and read-only files, - along with token counts to encourage proactive context management. + Generate a summary of the current context, including file content tokens and additional context blocks, + with an accurate total token count. """ if not self.use_enhanced_context: return None - try: result = "\n" result += "## Current Context Overview\n\n" - - # Get model context limits max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 max_output_tokens = self.main_model.info.get("max_output_tokens") or 0 if max_input_tokens: result += f"Model context limit: {max_input_tokens:,} tokens\n\n" - - # Calculate total tokens in context - total_tokens = 0 + + total_file_tokens = 0 editable_tokens = 0 readonly_tokens = 0 - - # Track editable files + editable_files = [] + readonly_files = [] + + # Editable files if self.abs_fnames: result += "### Editable Files\n\n" - editable_files = [] - for fname in sorted(self.abs_fnames): rel_fname = self.get_rel_fname(fname) content = self.io.read_text(fname) if content is not None: - token_count = self.main_model.token_count(content) - total_tokens += token_count - editable_tokens += token_count - size_indicator = "🔴 Large" if token_count > 5000 else ("🟡 Medium" if token_count > 1000 else "🟢 Small") - editable_files.append(f"- {rel_fname}: {token_count:,} tokens ({size_indicator})") - + tokens = self.main_model.token_count(content) + total_file_tokens += tokens + editable_tokens += tokens + size_indicator = "🔴 Large" if tokens > 5000 else ("🟡 Medium" if tokens > 1000 else "🟢 Small") + editable_files.append(f"- {rel_fname}: {tokens:,} tokens ({size_indicator})") if editable_files: result += "\n".join(editable_files) + "\n\n" result += f"**Total editable: {len(editable_files)} files, {editable_tokens:,} tokens**\n\n" else: result += "No editable files in context\n\n" - - # Track read-only files + + # Read-only files if self.abs_read_only_fnames: result += "### Read-Only Files\n\n" - readonly_files = [] - for fname in sorted(self.abs_read_only_fnames): rel_fname = self.get_rel_fname(fname) content = self.io.read_text(fname) if content is not None: - token_count = self.main_model.token_count(content) - total_tokens += token_count - readonly_tokens += token_count - size_indicator = "🔴 Large" if token_count > 5000 else ("🟡 Medium" if token_count > 1000 else "🟢 Small") - readonly_files.append(f"- {rel_fname}: {token_count:,} tokens ({size_indicator})") - + tokens = self.main_model.token_count(content) + total_file_tokens += tokens + readonly_tokens += tokens + size_indicator = "🔴 Large" if tokens > 5000 else ("🟡 Medium" if tokens > 1000 else "🟢 Small") + readonly_files.append(f"- {rel_fname}: {tokens:,} tokens ({size_indicator})") if readonly_files: result += "\n".join(readonly_files) + "\n\n" result += f"**Total read-only: {len(readonly_files)} files, {readonly_tokens:,} tokens**\n\n" else: result += "No read-only files in context\n\n" - - # Summary and recommendations + + # Additional enhanced context blocks + env_info = self.get_environment_info() + dir_structure = self.get_directory_structure() + git_status = self.get_git_status() + symbol_outline = self.get_context_symbol_outline() + + extra_context = "" + extra_tokens = 0 + if env_info: + extra_context += env_info + "\n\n" + extra_tokens += self.main_model.token_count(env_info) + if dir_structure: + extra_context += dir_structure + "\n\n" + extra_tokens += self.main_model.token_count(dir_structure) + if git_status: + extra_context += git_status + "\n\n" + extra_tokens += self.main_model.token_count(git_status) + if symbol_outline: + extra_context += symbol_outline + "\n\n" + extra_tokens += self.main_model.token_count(symbol_outline) + + total_tokens = total_file_tokens + extra_tokens + + result += f"**Total files usage: {total_file_tokens:,} tokens**\n\n" + result += f"**Additional context usage: {extra_tokens:,} tokens**\n\n" result += f"**Total context usage: {total_tokens:,} tokens**" - if max_input_tokens: percentage = (total_tokens / max_input_tokens) * 100 result += f" ({percentage:.1f}% of limit)" - if percentage > 80: - result += "\n\n⚠️ **Context is getting full!** Consider removing files with:\n" - result += "- `[tool_call(Remove, file_path=\"path/to/large_file.ext\")]` for files no longer needed\n" - result += "- Focus on keeping only essential files in context for best results" - + result += "\n\n⚠️ **Context is getting full!** Remove non-essential files via:\n" + result += "- `[tool_call(Remove, file_path=\"path/to/large_file.ext\")]`\n" + result += "- Keep only essential files in context for best performance" result += "\n" return result - except Exception as e: self.io.tool_error(f"Error generating context summary: {str(e)}") return None From 209518b73fc69dcea6e26f288bd229a425f73857 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sat, 12 Apr 2025 14:35:04 -0400 Subject: [PATCH 56/63] Remove some unnecessary code --- aider/coders/navigator_coder.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 2a21bdedb..7b211d444 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -49,7 +49,6 @@ from aider.tools.undo_change import _execute_undo_change from aider.tools.list_changes import _execute_list_changes from aider.tools.extract_lines import _execute_extract_lines from aider.tools.show_numbered_context import execute_show_numbered_context -from aider.tools.view_files_with_symbol import _execute_view_files_with_symbol # Import the function class NavigatorCoder(Coder): @@ -1513,7 +1512,6 @@ Just reply with fixed versions of the {blocks} above that failed to match. return True - # ------------------- Helper for finding occurrences ------------------- def _find_occurrences(self, content, pattern, near_context=None): """Find all occurrences of pattern, optionally filtered by near_context.""" @@ -1540,7 +1538,6 @@ Just reply with fixed versions of the {blocks} above that failed to match. # ------------------- Helper for finding occurrences ------------------- - # ------------------- Granular Editing Tools ------------------- From b4141c65378ab84b9f4a06a6722dd582d453d85a Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sun, 13 Apr 2025 13:56:22 -0400 Subject: [PATCH 57/63] Move enhanced context to the end, to help prompt caching --- aider/coders/navigator_coder.py | 34 ++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 7b211d444..b9a3d0119 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -182,11 +182,16 @@ class NavigatorCoder(Coder): """ Override parent's format_chat_chunks to include enhanced context blocks with a cleaner, more hierarchical structure for better organization. + + Optimized for prompt caching: enhanced context blocks are inserted after static + chat elements (system, examples, repo, readonly_files, done) but before variable + elements (chat_files, cur, reminder) to preserve prefix caching while providing + fresh context information. """ # First get the normal chat chunks from the parent method chunks = super().format_chat_chunks() # Calls BaseCoder's format_chat_chunks - # If enhanced context blocks are enabled, prepend them to the current messages + # If enhanced context blocks are enabled, insert them in a strategic position if self.use_enhanced_context: # Create environment info context block env_context = self.get_environment_info() @@ -216,18 +221,25 @@ class NavigatorCoder(Coder): if symbol_outline: # Add the new block if it was generated context_blocks.append(symbol_outline) - # If we have any context blocks, prepend them to the system message - if context_blocks: + # Insert a fresh context update as a separate message before current messages + # This preserves cacheable prefix portions (system, examples, repo, etc.) + # while still providing fresh context information + if context_blocks and chunks.cur: context_message = "\n\n".join(context_blocks) - # Prepend to system context but don't overwrite existing system content - if chunks.system: - # If we already have system messages, append our context to the first one - original_content = chunks.system[0]["content"] - # Ensure there's separation between our blocks and the original prompt - chunks.system[0]["content"] = context_message + "\n\n" + original_content + # Insert fresh context as a system message right before the first user message in cur + for i, msg in enumerate(chunks.cur): + if msg["role"] == "user": + # Insert context message right before the first user message + chunks.cur.insert(i, dict(role="system", content=context_message)) + break else: - # Otherwise, create a new system message - chunks.system = [dict(role="system", content=context_message)] + # If no user message found, append to the end of chat_files + # (just before any existing cur messages) + chunks.chat_files.append(dict(role="system", content=context_message)) + elif context_blocks: + # If there are context blocks but no cur messages, append to chat_files + context_message = "\n\n".join(context_blocks) + chunks.chat_files.append(dict(role="system", content=context_message)) return chunks From b36aaa6f6227162b6ebbe36d7fd057ad2d644103 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sun, 13 Apr 2025 14:53:28 -0400 Subject: [PATCH 58/63] feat: make search/replace blocks trigger another turn in navigator mode --- aider/coders/navigator_coder.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index b9a3d0119..92142b6b4 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -442,6 +442,31 @@ class NavigatorCoder(Coder): # return False to trigger a reflection loop. if self.reflected_message: return False + + # If edits were successfully applied and we haven't exceeded reflection limits, + # set up for another iteration (similar to tool calls) + if edited_files and self.num_reflections < self.max_reflections: + # Get the original user question from the most recent user message + if self.cur_messages and len(self.cur_messages) >= 1: + for msg in reversed(self.cur_messages): + if msg["role"] == "user": + original_question = msg["content"] + break + else: + # Default if no user message found + original_question = "Please continue your exploration and provide a final answer." + + # Construct the message for the next turn + next_prompt = ( + "I have applied the edits you suggested. " + f"The following files were modified: {', '.join(edited_files)}. " + "Let me continue working on your request.\n\n" + f"Your original question was: {original_question}" + ) + + self.reflected_message = next_prompt + self.io.tool_output("Continuing after applying edits...") + return False # Indicate that we need another iteration # If any tool calls were found and we haven't exceeded reflection limits, set up for another iteration # This is implicit continuation when any tool calls are present, rather than requiring Continue explicitly From 930880151ef5be211c67fc6806093966c0dc136c Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sun, 13 Apr 2025 15:42:09 -0400 Subject: [PATCH 59/63] Try to improve InsertBlock --- aider/coders/navigator_prompts.py | 12 ++- aider/tools/insert_block.py | 141 +++++++++++++++++++++++------- aider/tools/tool_utils.py | 17 ++-- 3 files changed, 124 insertions(+), 46 deletions(-) diff --git a/aider/coders/navigator_prompts.py b/aider/coders/navigator_prompts.py index 335fd3ec6..52f87324a 100644 --- a/aider/coders/navigator_prompts.py +++ b/aider/coders/navigator_prompts.py @@ -84,8 +84,16 @@ Act as an expert software engineer with the ability to autonomously navigate and Replace ALL occurrences of text. Use with caution. `dry_run=True` simulates the change. *Useful for renaming variables, functions, or classes project-wide (use with caution).* -- **InsertBlock**: `[tool_call(InsertBlock, file_path="...", content="...", after_pattern="...", near_context="...", occurrence=1, dry_run=False)]` - Insert a block after (`after_pattern`) or before (`before_pattern`) a pattern line. Use `near_context` and `occurrence` (optional, default 1, -1 for last) to specify which pattern match. `dry_run=True` simulates. +- **InsertBlock**: `[tool_call(InsertBlock, file_path="...", content="...", after_pattern="...", before_pattern="...", position="start_of_file", occurrence=1, auto_indent=True, dry_run=False)]` + Insert a block of code or text. Specify *exactly one* location: + - `after_pattern`: Insert after lines matching this pattern (use multi-line patterns for uniqueness) + - `before_pattern`: Insert before lines matching this pattern (use multi-line patterns for uniqueness) + - `position`: Use "start_of_file" or "end_of_file" + + Optional parameters: + - `occurrence`: Which match to use (1-based indexing: 1 for first match, 2 for second, -1 for last match) + - `auto_indent`: Automatically adjust indentation to match surrounding code (default True) + - `dry_run`: Simulate the change without applying it (default False) *Useful for adding new functions, methods, or blocks of configuration.* - **DeleteBlock**: `[tool_call(DeleteBlock, file_path="...", start_pattern="...", end_pattern="...", near_context="...", occurrence=1, dry_run=False)]` diff --git a/aider/tools/insert_block.py b/aider/tools/insert_block.py index 75443e987..85e24dd4e 100644 --- a/aider/tools/insert_block.py +++ b/aider/tools/insert_block.py @@ -1,4 +1,5 @@ import os +import re import traceback from .tool_utils import ( ToolError, @@ -11,39 +12,108 @@ from .tool_utils import ( generate_unified_diff_snippet, ) -def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None, near_context=None, occurrence=1, change_id=None, dry_run=False): +def _execute_insert_block(coder, file_path, content, after_pattern=None, before_pattern=None, + occurrence=1, change_id=None, dry_run=False, + position=None, auto_indent=True, use_regex=False): """ Insert a block of text after or before a specified pattern using utility functions. + + Args: + coder: The coder instance + file_path: Path to the file to modify + content: The content to insert + after_pattern: Pattern to insert after (mutually exclusive with before_pattern and position) + before_pattern: Pattern to insert before (mutually exclusive with after_pattern and position) + occurrence: Which occurrence of the pattern to use (1-based, or -1 for last) + change_id: Optional ID for tracking changes + dry_run: If True, only simulate the change + position: Special position like "start_of_file" or "end_of_file" + auto_indent: If True, automatically adjust indentation of inserted content + use_regex: If True, treat patterns as regular expressions """ tool_name = "InsertBlock" try: # 1. Validate parameters - if after_pattern and before_pattern: - raise ToolError("Cannot specify both after_pattern and before_pattern") - if not after_pattern and not before_pattern: - raise ToolError("Must specify either after_pattern or before_pattern") + if sum(x is not None for x in [after_pattern, before_pattern, position]) != 1: + raise ToolError("Must specify exactly one of: after_pattern, before_pattern, or position") # 2. Validate file and get content abs_path, rel_path, original_content = validate_file_for_edit(coder, file_path) lines = original_content.splitlines() + + # Handle empty files + if not lines: + lines = [""] - # 3. Find the target line index - pattern = after_pattern if after_pattern else before_pattern - pattern_type = "after" if after_pattern else "before" - pattern_desc = f"Pattern '{pattern}'" - if near_context: - pattern_desc += f" near context '{near_context}'" + # 3. Determine insertion point + insertion_line_idx = 0 + pattern_type = "" + pattern_desc = "" + occurrence_str = "" + + if position: + # Handle special positions + if position == "start_of_file": + insertion_line_idx = 0 + pattern_type = "at start of" + elif position == "end_of_file": + insertion_line_idx = len(lines) + pattern_type = "at end of" + else: + raise ToolError(f"Invalid position: '{position}'. Valid values are 'start_of_file' or 'end_of_file'") + else: + # Handle pattern-based insertion + pattern = after_pattern if after_pattern else before_pattern + pattern_type = "after" if after_pattern else "before" + pattern_desc = f"Pattern '{pattern}'" + + # Find pattern matches + pattern_line_indices = find_pattern_indices(lines, pattern, + use_regex=use_regex) + + # Select the target occurrence + target_line_idx = select_occurrence_index(pattern_line_indices, occurrence, pattern_desc) + + # Determine insertion point + insertion_line_idx = target_line_idx + if pattern_type == "after": + insertion_line_idx += 1 # Insert on the line *after* the matched line + + # Format occurrence info for output + num_occurrences = len(pattern_line_indices) + occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - pattern_line_indices = find_pattern_indices(lines, pattern, near_context) - target_line_idx = select_occurrence_index(pattern_line_indices, occurrence, pattern_desc) - - # Determine the final insertion line index - insertion_line_idx = target_line_idx - if pattern_type == "after": - insertion_line_idx += 1 # Insert on the line *after* the matched line - - # 4. Prepare the insertion + # 4. Handle indentation if requested content_lines = content.splitlines() + + if auto_indent and content_lines: + # Determine base indentation level + base_indent = "" + if insertion_line_idx > 0 and lines: + # Use indentation from the line before insertion point + reference_line_idx = min(insertion_line_idx - 1, len(lines) - 1) + reference_line = lines[reference_line_idx] + base_indent = re.match(r'^(\s*)', reference_line).group(1) + + # Apply indentation to content lines, preserving relative indentation + if content_lines: + # Find minimum indentation in content to preserve relative indentation + content_indents = [len(re.match(r'^(\s*)', line).group(1)) for line in content_lines if line.strip()] + min_content_indent = min(content_indents) if content_indents else 0 + + # Apply base indentation while preserving relative indentation + indented_content_lines = [] + for line in content_lines: + if not line.strip(): # Empty or whitespace-only line + indented_content_lines.append("") + else: + # Remove existing indentation and add new base indentation + stripped_line = line[min_content_indent:] if min_content_indent <= len(line) else line + indented_content_lines.append(base_indent + stripped_line) + + content_lines = indented_content_lines + + # 5. Prepare the insertion new_lines = lines[:insertion_line_idx] + content_lines + lines[insertion_line_idx:] new_content = '\n'.join(new_lines) @@ -51,31 +121,38 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_ coder.io.tool_warning(f"No changes made: insertion would not change file") return f"Warning: No changes made (insertion would not change file)" - # 5. Generate diff for feedback + # 6. Generate diff for feedback diff_snippet = generate_unified_diff_snippet(original_content, new_content, rel_path) - num_occurrences = len(pattern_line_indices) - occurrence_str = f"occurrence {occurrence} of " if num_occurrences > 1 else "" - - # 6. Handle dry run + + # 7. Handle dry run if dry_run: - dry_run_message = f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path} at line {insertion_line_idx + 1}." + if position: + dry_run_message = f"Dry run: Would insert block {pattern_type} {file_path}." + else: + dry_run_message = f"Dry run: Would insert block {pattern_type} {occurrence_str}pattern '{pattern}' in {file_path} at line {insertion_line_idx + 1}." return format_tool_result(coder, tool_name, "", dry_run=True, dry_run_message=dry_run_message, diff_snippet=diff_snippet) - # 7. Apply Change (Not dry run) + # 8. Apply Change (Not dry run) metadata = { 'insertion_line_idx': insertion_line_idx, 'after_pattern': after_pattern, 'before_pattern': before_pattern, - 'near_context': near_context, + 'position': position, 'occurrence': occurrence, - 'content': content + 'content': content, + 'auto_indent': auto_indent, + 'use_regex': use_regex } final_change_id = apply_change( coder, abs_path, rel_path, original_content, new_content, 'insertblock', metadata, change_id ) - # 8. Format and return result - success_message = f"Inserted block {pattern_type} {occurrence_str}pattern in {file_path} at line {insertion_line_idx + 1}" + # 9. Format and return result + if position: + success_message = f"Inserted block {pattern_type} {file_path}" + else: + success_message = f"Inserted block {pattern_type} {occurrence_str}pattern in {file_path} at line {insertion_line_idx + 1}" + return format_tool_result( coder, tool_name, success_message, change_id=final_change_id, diff_snippet=diff_snippet ) @@ -86,4 +163,4 @@ def _execute_insert_block(coder, file_path, content, after_pattern=None, before_ except Exception as e: coder.io.tool_error(f"Error in InsertBlock: {str(e)}\n{traceback.format_exc()}") # Add traceback - return f"Error: {str(e)}" \ No newline at end of file + return f"Error: {str(e)}" diff --git a/aider/tools/tool_utils.py b/aider/tools/tool_utils.py index 19ff1b4f1..8c43ca5bb 100644 --- a/aider/tools/tool_utils.py +++ b/aider/tools/tool_utils.py @@ -1,5 +1,6 @@ import difflib import os +import re import traceback class ToolError(Exception): @@ -46,20 +47,12 @@ def validate_file_for_edit(coder, file_path): return abs_path, rel_path, content -def find_pattern_indices(lines, pattern, near_context=None): - """Finds all line indices matching a pattern, optionally filtered by context.""" +def find_pattern_indices(lines, pattern, use_regex=False): + """Finds all line indices matching a pattern.""" indices = [] for i, line in enumerate(lines): - if pattern in line: - if near_context: - # Check if near_context is within a window around the match - context_window_start = max(0, i - 5) # Check 5 lines before/after - context_window_end = min(len(lines), i + 6) - context_block = "\n".join(lines[context_window_start:context_window_end]) - if near_context in context_block: - indices.append(i) - else: - indices.append(i) + if (use_regex and re.search(pattern, line)) or (not use_regex and pattern in line): + indices.append(i) return indices def select_occurrence_index(indices, occurrence, pattern_desc="Pattern"): From 02c092afff936d70bf03d2800553f635db8a21e9 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Sun, 13 Apr 2025 15:42:09 -0400 Subject: [PATCH 60/63] Try for more cache-friendly prompt ordering --- aider/coders/navigator_coder.py | 213 +++++++++++++++++++++++--------- 1 file changed, 158 insertions(+), 55 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 92142b6b4..9940a7147 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -15,7 +15,7 @@ from xml.etree.ElementTree import ParseError # Add necessary imports if not already present from collections import defaultdict -from .base_coder import Coder +from .base_coder import Coder, ChatChunks from .editblock_coder import find_original_update_blocks, do_replace, find_similar_lines from .navigator_prompts import NavigatorPrompts from .navigator_legacy_prompts import NavigatorLegacyPrompts @@ -183,63 +183,164 @@ class NavigatorCoder(Coder): Override parent's format_chat_chunks to include enhanced context blocks with a cleaner, more hierarchical structure for better organization. - Optimized for prompt caching: enhanced context blocks are inserted after static - chat elements (system, examples, repo, readonly_files, done) but before variable - elements (chat_files, cur, reminder) to preserve prefix caching while providing - fresh context information. + Optimized for prompt caching by placing context blocks strategically: + 1. Relatively static blocks (directory structure, environment info) before done_messages + 2. Dynamic blocks (context summary, symbol outline, git status) after chat_files + + This approach preserves prefix caching while providing fresh context information. """ - # First get the normal chat chunks from the parent method - chunks = super().format_chat_chunks() # Calls BaseCoder's format_chat_chunks + # First get the normal chat chunks from the parent method without calling super + # We'll manually build the chunks to control placement of context blocks + chunks = self.format_chat_chunks_base() + + # If enhanced context blocks are not enabled, just return the base chunks + if not self.use_enhanced_context: + return chunks + + # Generate all context blocks + env_context = self.get_environment_info() + context_summary = self.get_context_summary() + dir_structure = self.get_directory_structure() + git_status = self.get_git_status() + symbol_outline = self.get_context_symbol_outline() + + # 1. Add relatively static blocks BEFORE done_messages + # These blocks change less frequently and can be part of the cacheable prefix + static_blocks = [] + if dir_structure: + static_blocks.append(dir_structure) + if env_context: + static_blocks.append(env_context) + + if static_blocks: + static_message = "\n\n".join(static_blocks) + # Insert as a system message right before done_messages + chunks.done.insert(0, dict(role="system", content=static_message)) + + # 2. Add dynamic blocks AFTER chat_files + # These blocks change with the current files in context + dynamic_blocks = [] + if context_summary: + dynamic_blocks.append(context_summary) + if symbol_outline: + dynamic_blocks.append(symbol_outline) + if git_status: + dynamic_blocks.append(git_status) + + if dynamic_blocks: + dynamic_message = "\n\n".join(dynamic_blocks) + # Append as a system message after chat_files + chunks.chat_files.append(dict(role="system", content=dynamic_message)) + + return chunks + + def format_chat_chunks_base(self): + """ + Create base chat chunks without enhanced context blocks. + This is a copy of the parent's format_chat_chunks method to avoid + calling super() which would create a recursive loop. + """ + self.choose_fence() + main_sys = self.fmt_system_prompt(self.gpt_prompts.main_system) - # If enhanced context blocks are enabled, insert them in a strategic position - if self.use_enhanced_context: - # Create environment info context block - env_context = self.get_environment_info() + example_messages = [] + if self.main_model.examples_as_sys_msg: + if self.gpt_prompts.example_messages: + main_sys += "\n# Example conversations:\n\n" + for msg in self.gpt_prompts.example_messages: + role = msg["role"] + content = self.fmt_system_prompt(msg["content"]) + main_sys += f"## {role.upper()}: {content}\n\n" + main_sys = main_sys.strip() + else: + for msg in self.gpt_prompts.example_messages: + example_messages.append( + dict( + role=msg["role"], + content=self.fmt_system_prompt(msg["content"]), + ) + ) + if self.gpt_prompts.example_messages: + example_messages += [ + dict( + role="user", + content=( + "I switched to a new code base. Please don't consider the above files" + " or try to edit them any longer." + ), + ), + dict(role="assistant", content="Ok."), + ] - # Get current context summary - context_summary = self.get_context_summary() + if self.gpt_prompts.system_reminder: + main_sys += "\n" + self.fmt_system_prompt(self.gpt_prompts.system_reminder) - # Get directory structure - dir_structure = self.get_directory_structure() + chunks = ChatChunks() - # Get git status - git_status = self.get_git_status() + if self.main_model.use_system_prompt: + chunks.system = [ + dict(role="system", content=main_sys), + ] + else: + chunks.system = [ + dict(role="user", content=main_sys), + dict(role="assistant", content="Ok."), + ] - # Get symbol outline for current context files - symbol_outline = self.get_context_symbol_outline() + chunks.examples = example_messages - # Collect all context blocks that exist - context_blocks = [] - if env_context: - context_blocks.append(env_context) - if context_summary: - context_blocks.append(context_summary) - if dir_structure: - context_blocks.append(dir_structure) - if git_status: - context_blocks.append(git_status) - if symbol_outline: # Add the new block if it was generated - context_blocks.append(symbol_outline) + self.summarize_end() + chunks.done = self.done_messages - # Insert a fresh context update as a separate message before current messages - # This preserves cacheable prefix portions (system, examples, repo, etc.) - # while still providing fresh context information - if context_blocks and chunks.cur: - context_message = "\n\n".join(context_blocks) - # Insert fresh context as a system message right before the first user message in cur - for i, msg in enumerate(chunks.cur): - if msg["role"] == "user": - # Insert context message right before the first user message - chunks.cur.insert(i, dict(role="system", content=context_message)) - break - else: - # If no user message found, append to the end of chat_files - # (just before any existing cur messages) - chunks.chat_files.append(dict(role="system", content=context_message)) - elif context_blocks: - # If there are context blocks but no cur messages, append to chat_files - context_message = "\n\n".join(context_blocks) - chunks.chat_files.append(dict(role="system", content=context_message)) + chunks.repo = self.get_repo_messages() + chunks.readonly_files = self.get_readonly_files_messages() + chunks.chat_files = self.get_chat_files_messages() + + if self.gpt_prompts.system_reminder: + reminder_message = [ + dict( + role="system", content=self.fmt_system_prompt(self.gpt_prompts.system_reminder) + ), + ] + else: + reminder_message = [] + + chunks.cur = list(self.cur_messages) + chunks.reminder = [] + + # TODO review impact of token count on image messages + messages_tokens = self.main_model.token_count(chunks.all_messages()) + reminder_tokens = self.main_model.token_count(reminder_message) + cur_tokens = self.main_model.token_count(chunks.cur) + + if None not in (messages_tokens, reminder_tokens, cur_tokens): + total_tokens = messages_tokens + reminder_tokens + cur_tokens + else: + # add the reminder anyway + total_tokens = 0 + + if chunks.cur: + final = chunks.cur[-1] + else: + final = None + + max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 + # Add the reminder prompt if we still have room to include it. + if ( + not max_input_tokens + or total_tokens < max_input_tokens + and self.gpt_prompts.system_reminder + ): + if self.main_model.reminder == "sys": + chunks.reminder = reminder_message + elif self.main_model.reminder == "user" and final and final["role"] == "user": + # stuff it into the user message + new_content = ( + final["content"] + + "\n\n" + + self.fmt_system_prompt(self.gpt_prompts.system_reminder) + ) + chunks.cur[-1] = dict(role=final["role"], content=new_content) return chunks @@ -865,14 +966,16 @@ class NavigatorCoder(Coder): content = params.get('content') after_pattern = params.get('after_pattern') before_pattern = params.get('before_pattern') - near_context = params.get('near_context') # New - occurrence = params.get('occurrence', 1) # New, default 1 + occurrence = params.get('occurrence', 1) # Default 1 change_id = params.get('change_id') - dry_run = params.get('dry_run', False) # New, default False + dry_run = params.get('dry_run', False) # Default False + position = params.get('position') + auto_indent = params.get('auto_indent', True) # Default True + use_regex = params.get('use_regex', False) # Default False - if file_path is not None and content is not None and (after_pattern is not None or before_pattern is not None): + if file_path is not None and content is not None and (after_pattern is not None or before_pattern is not None or position is not None): result_message = _execute_insert_block( - self, file_path, content, after_pattern, before_pattern, near_context, occurrence, change_id, dry_run + self, file_path, content, after_pattern, before_pattern, occurrence, change_id, dry_run, position, auto_indent, use_regex ) else: result_message = "Error: Missing required parameters for InsertBlock (file_path, content, and either after_pattern or before_pattern)" From b155143845772981553e4d1219c6a88c02b28e9b Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Mon, 14 Apr 2025 07:13:23 -0400 Subject: [PATCH 61/63] Try to have legacy navigator mode use SEARCH/REPLACE blocks more cleanly --- aider/coders/navigator_legacy_prompts.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/aider/coders/navigator_legacy_prompts.py b/aider/coders/navigator_legacy_prompts.py index 2c5b60a46..538d03b43 100644 --- a/aider/coders/navigator_legacy_prompts.py +++ b/aider/coders/navigator_legacy_prompts.py @@ -118,7 +118,7 @@ When you include any tool call, the system will automatically continue to the ne ``` Your answer to the user's question... -SEARCH/REPLACE blocks appear BEFORE the last '---' separator. +SEARCH/REPLACE blocks can ONLY appear BEFORE the last '---' separator. Any SEARCH/REPLACE blocks after the separator will be IGNORED. file.py <<<<<<< SEARCH @@ -135,14 +135,17 @@ new code ## SEARCH/REPLACE Block Format When you need to make changes to code, use the SEARCH/REPLACE block format. You can include multiple edits in one message. -``` +````python path/to/file.ext <<<<<<< SEARCH Original code lines to match exactly ======= Replacement code lines >>>>>>> REPLACE -``` +```` +NOTE that this uses four backticks as the fence and not three! + +IMPORTANT: Any SEARCH/REPLACE blocks that appear after the last '---' separator will be IGNORED. #### Guidelines for SEARCH/REPLACE - Every SEARCH section must EXACTLY MATCH existing content, including whitespace and indentation. @@ -287,8 +290,9 @@ Here are summaries of some files present in this repo: - If emitting 3 or more tool calls, OR if any tool call spans multiple lines, place each call on a new line for clarity. ## SEARCH/REPLACE blocks -- SEARCH/REPLACE blocks MUST appear BEFORE the last '---' separator line in your response +- When using SEARCH/REPLACE blocks, they MUST ONLY appear BEFORE the last '---' separator line in your response - If there is no '---' separator, they can appear anywhere in your response +- IMPORTANT: Using SEARCH/REPLACE blocks is the standard editing method in this mode - Format example: ``` Your answer text here... @@ -303,6 +307,8 @@ Here are summaries of some files present in this repo: --- [tool_call(ToolName, param1=value1)] ``` + Note that SEARCH/REPLACE blocks should use four backticks (````) as the fence, not three +- IMPORTANT: Any SEARCH/REPLACE blocks that appear after the last '---' separator will be IGNORED ## Context Features - Use enhanced context blocks (directory structure and git status) to orient yourself From b8f775743592f414ae4d96355f3cd04b5278c933 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Mon, 14 Apr 2025 08:34:21 -0400 Subject: [PATCH 62/63] Bring accurate enhanced context token counts to /tokens --- aider/coders/navigator_coder.py | 166 ++++++++++++++++++++++++++------ aider/commands.py | 33 +++++++ 2 files changed, 170 insertions(+), 29 deletions(-) diff --git a/aider/coders/navigator_coder.py b/aider/coders/navigator_coder.py index 9940a7147..8fbcb7284 100644 --- a/aider/coders/navigator_coder.py +++ b/aider/coders/navigator_coder.py @@ -93,8 +93,100 @@ class NavigatorCoder(Coder): # Enable enhanced context blocks by default self.use_enhanced_context = True + # Initialize empty token tracking dictionary and cache structures + # but don't populate yet to avoid startup delay + self.context_block_tokens = {} + self.context_blocks_cache = {} + self.tokens_calculated = False + super().__init__(*args, **kwargs) + def _calculate_context_block_tokens(self, force=False): + """ + Calculate token counts for all enhanced context blocks. + This is the central method for calculating token counts, + ensuring they're consistent across all parts of the code. + + This method populates the cache for context blocks and calculates tokens. + + Args: + force: If True, recalculate tokens even if already calculated + """ + # Skip if already calculated and not forced + if hasattr(self, 'tokens_calculated') and self.tokens_calculated and not force: + return + + # Clear existing token counts + self.context_block_tokens = {} + + # Initialize the cache for context blocks if needed + if not hasattr(self, 'context_blocks_cache'): + self.context_blocks_cache = {} + + if not self.use_enhanced_context: + return + + try: + # First, clear the cache to force regeneration of all blocks + self.context_blocks_cache = {} + + # Generate all context blocks and calculate token counts + block_types = ["environment_info", "directory_structure", "git_status", "symbol_outline"] + + for block_type in block_types: + block_content = self._generate_context_block(block_type) + if block_content: + self.context_block_tokens[block_type] = self.main_model.token_count(block_content) + + # Mark as calculated + self.tokens_calculated = True + except Exception as e: + # Silently handle errors during calculation + # This prevents errors in token counting from breaking the main functionality + pass + + def _generate_context_block(self, block_name): + """ + Generate a specific context block and cache it. + This is a helper method for get_cached_context_block. + """ + content = None + + if block_name == "environment_info": + content = self.get_environment_info() + elif block_name == "directory_structure": + content = self.get_directory_structure() + elif block_name == "git_status": + content = self.get_git_status() + elif block_name == "symbol_outline": + content = self.get_context_symbol_outline() + elif block_name == "context_summary": + content = self.get_context_summary() + + # Cache the result if it's not None + if content is not None: + self.context_blocks_cache[block_name] = content + + return content + + def get_cached_context_block(self, block_name): + """ + Get a context block from the cache, or generate it if not available. + This should be used by format_chat_chunks to avoid regenerating blocks. + + This will ensure tokens are calculated if they haven't been yet. + """ + # Make sure tokens have been calculated at least once + if not hasattr(self, 'tokens_calculated') or not self.tokens_calculated: + self._calculate_context_block_tokens() + + # Return from cache if available + if hasattr(self, 'context_blocks_cache') and block_name in self.context_blocks_cache: + return self.context_blocks_cache[block_name] + + # Otherwise generate and cache the block + return self._generate_context_block(block_name) + def set_granular_editing(self, enabled): """ Switch between granular editing tools and legacy search/replace. @@ -196,13 +288,19 @@ class NavigatorCoder(Coder): # If enhanced context blocks are not enabled, just return the base chunks if not self.use_enhanced_context: return chunks + + # Make sure token counts are updated - using centralized method + # This also populates the context block cache + self._calculate_context_block_tokens() - # Generate all context blocks - env_context = self.get_environment_info() + # Get blocks from cache to avoid regenerating them + env_context = self.get_cached_context_block("environment_info") + dir_structure = self.get_cached_context_block("directory_structure") + git_status = self.get_cached_context_block("git_status") + symbol_outline = self.get_cached_context_block("symbol_outline") + + # Context summary needs special handling because it depends on other blocks context_summary = self.get_context_summary() - dir_structure = self.get_directory_structure() - git_status = self.get_git_status() - symbol_outline = self.get_context_symbol_outline() # 1. Add relatively static blocks BEFORE done_messages # These blocks change less frequently and can be part of the cacheable prefix @@ -308,13 +406,20 @@ class NavigatorCoder(Coder): chunks.cur = list(self.cur_messages) chunks.reminder = [] - # TODO review impact of token count on image messages - messages_tokens = self.main_model.token_count(chunks.all_messages()) + # Use accurate token counting method that considers enhanced context blocks + base_messages = chunks.all_messages() + messages_tokens = self.main_model.token_count(base_messages) reminder_tokens = self.main_model.token_count(reminder_message) cur_tokens = self.main_model.token_count(chunks.cur) if None not in (messages_tokens, reminder_tokens, cur_tokens): - total_tokens = messages_tokens + reminder_tokens + cur_tokens + total_tokens = messages_tokens + # Only add tokens for reminder and cur if they're not already included + # in the messages_tokens calculation + if not chunks.reminder: + total_tokens += reminder_tokens + if not chunks.cur: + total_tokens += cur_tokens else: # add the reminder anyway total_tokens = 0 @@ -351,7 +456,16 @@ class NavigatorCoder(Coder): """ if not self.use_enhanced_context: return None + + # If context_summary is already in the cache, return it + if hasattr(self, 'context_blocks_cache') and "context_summary" in self.context_blocks_cache: + return self.context_blocks_cache["context_summary"] + try: + # Make sure token counts are updated before generating the summary + if not hasattr(self, 'context_block_tokens') or not self.context_block_tokens: + self._calculate_context_block_tokens() + result = "\n" result += "## Current Context Overview\n\n" max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 @@ -401,27 +515,8 @@ class NavigatorCoder(Coder): else: result += "No read-only files in context\n\n" - # Additional enhanced context blocks - env_info = self.get_environment_info() - dir_structure = self.get_directory_structure() - git_status = self.get_git_status() - symbol_outline = self.get_context_symbol_outline() - - extra_context = "" - extra_tokens = 0 - if env_info: - extra_context += env_info + "\n\n" - extra_tokens += self.main_model.token_count(env_info) - if dir_structure: - extra_context += dir_structure + "\n\n" - extra_tokens += self.main_model.token_count(dir_structure) - if git_status: - extra_context += git_status + "\n\n" - extra_tokens += self.main_model.token_count(git_status) - if symbol_outline: - extra_context += symbol_outline + "\n\n" - extra_tokens += self.main_model.token_count(symbol_outline) - + # Use the pre-calculated context block tokens + extra_tokens = sum(self.context_block_tokens.values()) total_tokens = total_file_tokens + extra_tokens result += f"**Total files usage: {total_file_tokens:,} tokens**\n\n" @@ -435,6 +530,12 @@ class NavigatorCoder(Coder): result += "- `[tool_call(Remove, file_path=\"path/to/large_file.ext\")]`\n" result += "- Keep only essential files in context for best performance" result += "\n" + + # Cache the result + if not hasattr(self, 'context_blocks_cache'): + self.context_blocks_cache = {} + self.context_blocks_cache["context_summary"] = result + return result except Exception as e: self.io.tool_error(f"Error generating context summary: {str(e)}") @@ -1647,8 +1748,15 @@ Just reply with fixed versions of the {blocks} above that failed to match. if self.use_enhanced_context: self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.") + # Mark tokens as needing calculation, but don't calculate yet (lazy calculation) + self.tokens_calculated = False + self.context_blocks_cache = {} else: self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.") + # Clear token counts and cache when disabled + self.context_block_tokens = {} + self.context_blocks_cache = {} + self.tokens_calculated = False return True diff --git a/aider/commands.py b/aider/commands.py index 7bba50a68..1e2ba8879 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -464,6 +464,20 @@ class Commands: tokens = self.coder.main_model.token_count(repo_content) res.append((tokens, "repository map", "use --map-tokens to resize")) + # Enhanced context blocks (only for navigator mode) + if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context: + # Force token calculation if it hasn't been done yet + if hasattr(self.coder, '_calculate_context_block_tokens'): + if not hasattr(self.coder, 'tokens_calculated') or not self.coder.tokens_calculated: + self.coder._calculate_context_block_tokens() + + # Add enhanced context blocks to the display + if hasattr(self.coder, 'context_block_tokens') and self.coder.context_block_tokens: + for block_name, tokens in self.coder.context_block_tokens.items(): + # Format the block name more nicely + display_name = block_name.replace('_', ' ').title() + res.append((tokens, f"{display_name} context block", "/context-blocks to toggle")) + fence = "`" * 3 file_res = [] @@ -872,6 +886,11 @@ class Commands: fname = self.coder.get_rel_fname(abs_file_path) self.io.tool_output(f"Added {fname} to the chat") self.coder.check_added_files() + + # Recalculate context block tokens if using navigator mode + if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context: + if hasattr(self.coder, '_calculate_context_block_tokens'): + self.coder._calculate_context_block_tokens() def completions_drop(self): files = self.coder.get_inchat_relative_files() @@ -891,9 +910,16 @@ class Commands: else: self.io.tool_output("Dropping all files from the chat session.") self._drop_all_files() + + # Recalculate context block tokens after dropping all files + if hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context: + if hasattr(self.coder, '_calculate_context_block_tokens'): + self.coder._calculate_context_block_tokens() return filenames = parse_quoted_filenames(args) + files_changed = False + for word in filenames: # Expand tilde in the path expanded_word = os.path.expanduser(word) @@ -916,6 +942,7 @@ class Commands: for matched_file in read_only_matched: self.coder.abs_read_only_fnames.remove(matched_file) self.io.tool_output(f"Removed read-only file {matched_file} from the chat") + files_changed = True # For editable files, use glob if word contains glob chars, otherwise use substring if any(c in expanded_word for c in "*?[]"): @@ -934,6 +961,12 @@ class Commands: if abs_fname in self.coder.abs_fnames: self.coder.abs_fnames.remove(abs_fname) self.io.tool_output(f"Removed {matched_file} from the chat") + files_changed = True + + # Recalculate context block tokens if any files were changed and using navigator mode + if files_changed and hasattr(self.coder, 'use_enhanced_context') and self.coder.use_enhanced_context: + if hasattr(self.coder, '_calculate_context_block_tokens'): + self.coder._calculate_context_block_tokens() def cmd_git(self, args): "Run a git command (output excluded from chat)" From 4bb786c1e08996900c2b73994c4182743a5af2f6 Mon Sep 17 00:00:00 2001 From: "Amar Sood (tekacs)" Date: Mon, 14 Apr 2025 09:12:03 -0400 Subject: [PATCH 63/63] Allow printing enhanced context blocks --- aider/commands.py | 63 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/aider/commands.py b/aider/commands.py index 1e2ba8879..c8c4ef647 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -898,6 +898,28 @@ class Commands: all_files = files + read_only_files all_files = [self.quote_fname(fn) for fn in all_files] return all_files + + def completions_context_blocks(self): + """Return available context block names for auto-completion.""" + if not hasattr(self.coder, 'use_enhanced_context') or not self.coder.use_enhanced_context: + return [] + + # If the coder has context blocks available + if hasattr(self.coder, 'context_block_tokens') and self.coder.context_block_tokens: + # Get all block names from the tokens dictionary + block_names = list(self.coder.context_block_tokens.keys()) + # Format them for display (convert snake_case to Title Case) + formatted_blocks = [name.replace('_', ' ').title() for name in block_names] + return formatted_blocks + + # Standard blocks that are typically available + return [ + "Context Summary", + "Directory Structure", + "Environment Info", + "Git Status", + "Symbol Outline" + ] def cmd_drop(self, args=""): "Remove files from the chat session to free up context space" @@ -1081,17 +1103,54 @@ class Commands: self.io.tool_output("Context management is now OFF - files will not be truncated.") def cmd_context_blocks(self, args=""): - "Toggle enhanced context blocks (directory structure and git status)" + "Toggle enhanced context blocks or print a specific block" if not hasattr(self.coder, 'use_enhanced_context'): self.io.tool_error("Enhanced context blocks are only available in navigator mode.") return + + # If an argument is provided, try to print that specific context block + if args.strip(): + # Format block name to match internal naming conventions + block_name = args.strip().lower().replace(" ", "_") - # Toggle the setting + # Check if the coder has the necessary method to get context blocks + if hasattr(self.coder, '_generate_context_block'): + # Force token recalculation to ensure blocks are fresh + if hasattr(self.coder, '_calculate_context_block_tokens'): + self.coder._calculate_context_block_tokens(force=True) + + # Try to get the requested block + block_content = self.coder._generate_context_block(block_name) + + if block_content: + # Calculate token count + tokens = self.coder.main_model.token_count(block_content) + self.io.tool_output(f"Context block '{args.strip()}' ({tokens} tokens):") + self.io.tool_output(block_content) + return + else: + # List available blocks if the requested one wasn't found + self.io.tool_error(f"Context block '{args.strip()}' not found or empty.") + if hasattr(self.coder, 'context_block_tokens'): + available_blocks = list(self.coder.context_block_tokens.keys()) + formatted_blocks = [name.replace('_', ' ').title() for name in available_blocks] + self.io.tool_output(f"Available blocks: {', '.join(formatted_blocks)}") + return + else: + self.io.tool_error("This coder doesn't support generating context blocks.") + return + + # If no argument, toggle the enhanced context setting self.coder.use_enhanced_context = not self.coder.use_enhanced_context # Report the new state if self.coder.use_enhanced_context: self.io.tool_output("Enhanced context blocks are now ON - directory structure and git status will be included.") + if hasattr(self.coder, 'context_block_tokens'): + available_blocks = list(self.coder.context_block_tokens.keys()) + formatted_blocks = [name.replace('_', ' ').title() for name in available_blocks] + self.io.tool_output(f"Available blocks: {', '.join(formatted_blocks)}") + self.io.tool_output("Use '/context-blocks [block name]' to view a specific block.") else: self.io.tool_output("Enhanced context blocks are now OFF - directory structure and git status will not be included.")