From 09b2d49f1140353a0a24180462423ebf5dfd3838 Mon Sep 17 00:00:00 2001 From: Luke Reeves Date: Tue, 3 Jun 2025 09:51:49 -0400 Subject: [PATCH] Use system prompt prefix for commit messages I've been using Qwen3 with reasoning disabled via a /no_think in the system prompt prefix. I found that the commit message generation was ignoring this setting. This change updates the commit message generation loop to incorporate that setting if defined. --- aider/repo.py | 18 +++++++++++++----- tests/basic/test_repo.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/aider/repo.py b/aider/repo.py index bd4937303..f14f17403 100644 --- a/aider/repo.py +++ b/aider/repo.py @@ -334,24 +334,32 @@ class GitRepo: content += diffs system_content = self.commit_prompt or prompts.commit_system + language_instruction = "" if user_language: language_instruction = f"\n- Is written in {user_language}." system_content = system_content.format(language_instruction=language_instruction) - messages = [ - dict(role="system", content=system_content), - dict(role="user", content=content), - ] - commit_message = None for model in self.models: spinner_text = f"Generating commit message with {model.name}" with WaitingSpinner(spinner_text): + if model.system_prompt_prefix: + current_system_content = model.system_prompt_prefix + "\n" + system_content + else: + current_system_content = system_content + + messages = [ + dict(role="system", content=current_system_content), + dict(role="user", content=content), + ] + num_tokens = model.token_count(messages) max_tokens = model.info.get("max_input_tokens") or 0 + if max_tokens and num_tokens > max_tokens: continue + commit_message = model.simple_send_with_retries(messages) if commit_message: break # Found a model that could generate the message diff --git a/tests/basic/test_repo.py b/tests/basic/test_repo.py index 5c366f87f..0f72c6763 100644 --- a/tests/basic/test_repo.py +++ b/tests/basic/test_repo.py @@ -683,3 +683,34 @@ class TestRepo(unittest.TestCase): # Verify the commit was actually made latest_commit_msg = raw_repo.head.commit.message self.assertEqual(latest_commit_msg.strip(), "Should succeed") + + @patch("aider.models.Model.simple_send_with_retries") + def test_get_commit_message_uses_system_prompt_prefix(self, mock_send): + """ + Verify that GitRepo.get_commit_message() prepends the model.system_prompt_prefix + to the system prompt sent to the LLM. + """ + mock_send.return_value = "good commit message" + + prefix = "MY-CUSTOM-PREFIX" + model = Model("gpt-3.5-turbo") + model.system_prompt_prefix = prefix + + with GitTemporaryDirectory(): + repo = GitRepo(InputOutput(), None, None, models=[model]) + + # Call the function under test + repo.get_commit_message("dummy diff", "dummy context") + + # Ensure the LLM was invoked once + mock_send.assert_called_once() + + # Grab the system message sent to the model + messages = mock_send.call_args[0][0] + system_msg_content = messages[0]["content"] + + # Verify the prefix is at the start of the system message + self.assertTrue( + system_msg_content.startswith(prefix), + "system_prompt_prefix should be prepended to the system prompt", + )