mirror of
https://github.com/Aider-AI/aider.git
synced 2025-06-09 14:14:59 +00:00
Use system prompt prefix for commit messages
I've been using Qwen3 with reasoning disabled via a /no_think in the system prompt prefix. I found that the commit message generation was ignoring this setting. This change updates the commit message generation loop to incorporate that setting if defined.
This commit is contained in:
parent
295122fc97
commit
09b2d49f11
2 changed files with 44 additions and 5 deletions
|
@ -334,24 +334,32 @@ class GitRepo:
|
||||||
content += diffs
|
content += diffs
|
||||||
|
|
||||||
system_content = self.commit_prompt or prompts.commit_system
|
system_content = self.commit_prompt or prompts.commit_system
|
||||||
|
|
||||||
language_instruction = ""
|
language_instruction = ""
|
||||||
if user_language:
|
if user_language:
|
||||||
language_instruction = f"\n- Is written in {user_language}."
|
language_instruction = f"\n- Is written in {user_language}."
|
||||||
system_content = system_content.format(language_instruction=language_instruction)
|
system_content = system_content.format(language_instruction=language_instruction)
|
||||||
|
|
||||||
messages = [
|
|
||||||
dict(role="system", content=system_content),
|
|
||||||
dict(role="user", content=content),
|
|
||||||
]
|
|
||||||
|
|
||||||
commit_message = None
|
commit_message = None
|
||||||
for model in self.models:
|
for model in self.models:
|
||||||
spinner_text = f"Generating commit message with {model.name}"
|
spinner_text = f"Generating commit message with {model.name}"
|
||||||
with WaitingSpinner(spinner_text):
|
with WaitingSpinner(spinner_text):
|
||||||
|
if model.system_prompt_prefix:
|
||||||
|
current_system_content = model.system_prompt_prefix + "\n" + system_content
|
||||||
|
else:
|
||||||
|
current_system_content = system_content
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
dict(role="system", content=current_system_content),
|
||||||
|
dict(role="user", content=content),
|
||||||
|
]
|
||||||
|
|
||||||
num_tokens = model.token_count(messages)
|
num_tokens = model.token_count(messages)
|
||||||
max_tokens = model.info.get("max_input_tokens") or 0
|
max_tokens = model.info.get("max_input_tokens") or 0
|
||||||
|
|
||||||
if max_tokens and num_tokens > max_tokens:
|
if max_tokens and num_tokens > max_tokens:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
commit_message = model.simple_send_with_retries(messages)
|
commit_message = model.simple_send_with_retries(messages)
|
||||||
if commit_message:
|
if commit_message:
|
||||||
break # Found a model that could generate the message
|
break # Found a model that could generate the message
|
||||||
|
|
|
@ -683,3 +683,34 @@ class TestRepo(unittest.TestCase):
|
||||||
# Verify the commit was actually made
|
# Verify the commit was actually made
|
||||||
latest_commit_msg = raw_repo.head.commit.message
|
latest_commit_msg = raw_repo.head.commit.message
|
||||||
self.assertEqual(latest_commit_msg.strip(), "Should succeed")
|
self.assertEqual(latest_commit_msg.strip(), "Should succeed")
|
||||||
|
|
||||||
|
@patch("aider.models.Model.simple_send_with_retries")
|
||||||
|
def test_get_commit_message_uses_system_prompt_prefix(self, mock_send):
|
||||||
|
"""
|
||||||
|
Verify that GitRepo.get_commit_message() prepends the model.system_prompt_prefix
|
||||||
|
to the system prompt sent to the LLM.
|
||||||
|
"""
|
||||||
|
mock_send.return_value = "good commit message"
|
||||||
|
|
||||||
|
prefix = "MY-CUSTOM-PREFIX"
|
||||||
|
model = Model("gpt-3.5-turbo")
|
||||||
|
model.system_prompt_prefix = prefix
|
||||||
|
|
||||||
|
with GitTemporaryDirectory():
|
||||||
|
repo = GitRepo(InputOutput(), None, None, models=[model])
|
||||||
|
|
||||||
|
# Call the function under test
|
||||||
|
repo.get_commit_message("dummy diff", "dummy context")
|
||||||
|
|
||||||
|
# Ensure the LLM was invoked once
|
||||||
|
mock_send.assert_called_once()
|
||||||
|
|
||||||
|
# Grab the system message sent to the model
|
||||||
|
messages = mock_send.call_args[0][0]
|
||||||
|
system_msg_content = messages[0]["content"]
|
||||||
|
|
||||||
|
# Verify the prefix is at the start of the system message
|
||||||
|
self.assertTrue(
|
||||||
|
system_msg_content.startswith(prefix),
|
||||||
|
"system_prompt_prefix should be prepended to the system prompt",
|
||||||
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue