feat: Add test for streaming reasoning content with proper formatting and output

This commit is contained in:
Paul Gauthier (aider) 2025-03-07 17:03:16 -08:00
parent 3b9c2b9729
commit 16d7cf7a52

View file

@ -65,6 +65,85 @@ class TestReasoning(unittest.TestCase):
reasoning_pos, main_pos, "Reasoning content should appear before main content"
)
def test_send_with_reasoning_content_stream(self):
"""Test that streaming reasoning content is properly formatted and output."""
# Setup IO with pretty output for streaming
io = InputOutput(pretty=True)
mock_mdstream = MagicMock()
io.get_assistant_mdstream = MagicMock(return_value=mock_mdstream)
# Setup model and coder
model = Model("gpt-3.5-turbo")
coder = Coder.create(model, None, io=io, stream=True)
# Ensure the coder shows pretty output
coder.show_pretty = MagicMock(return_value=True)
# Mock streaming response chunks
class MockStreamingChunk:
def __init__(self, content=None, reasoning_content=None, finish_reason=None):
self.choices = [MagicMock()]
self.choices[0].delta = MagicMock()
self.choices[0].finish_reason = finish_reason
# Set content if provided
if content is not None:
self.choices[0].delta.content = content
else:
# Need to handle attribute access that would raise AttributeError
delattr(self.choices[0].delta, 'content')
# Set reasoning_content if provided
if reasoning_content is not None:
self.choices[0].delta.reasoning_content = reasoning_content
else:
# Need to handle attribute access that would raise AttributeError
delattr(self.choices[0].delta, 'reasoning_content')
# Create chunks to simulate streaming
chunks = [
# First chunk with reasoning content starts the tag
MockStreamingChunk(reasoning_content="My step-by-step "),
# Additional reasoning content
MockStreamingChunk(reasoning_content="reasoning process"),
# Switch to main content - this will automatically end the reasoning tag
MockStreamingChunk(content="Final "),
# More main content
MockStreamingChunk(content="answer "),
MockStreamingChunk(content="after reasoning"),
# End the response
MockStreamingChunk(finish_reason="stop")
]
# Create a mock hash object
mock_hash = MagicMock()
mock_hash.hexdigest.return_value = "mock_hash_digest"
# Mock the model's send_completion to return the hash and completion
with patch.object(model, "send_completion", return_value=(mock_hash, chunks)):
# Call send with a simple message
messages = [{"role": "user", "content": "test prompt"}]
list(coder.send(messages))
# Verify mdstream.update was called
mock_mdstream.update.assert_called()
# Check the arguments of the final call to update
final_call = mock_mdstream.update.call_args_list[-1]
self.assertTrue(final_call[1]['final'])
final_text = final_call[0][0]
# The final text should include both reasoning and main content with proper formatting
self.assertIn("> Thinking ...", final_text)
self.assertIn("My step-by-step reasoning process", final_text)
self.assertIn("> ... done thinking", final_text)
self.assertIn("Final answer after reasoning", final_text)
# Ensure proper order: reasoning first, then main content
reasoning_pos = final_text.find("My step-by-step reasoning process")
main_pos = final_text.find("Final answer after reasoning")
self.assertLess(reasoning_pos, main_pos, "Reasoning content should appear before main content")
def test_send_with_think_tags(self):
"""Test that <think> tags are properly processed and formatted."""
# Setup IO with no pretty