mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-23 22:04:59 +00:00
move tests to test_sendchat.py
This commit is contained in:
parent
d0be8d4cbd
commit
28578217da
3 changed files with 43 additions and 46 deletions
|
@ -196,7 +196,8 @@ class Coder:
|
||||||
for fname in self.get_inchat_relative_files():
|
for fname in self.get_inchat_relative_files():
|
||||||
self.io.tool_output(f"Added {fname} to the chat.")
|
self.io.tool_output(f"Added {fname} to the chat.")
|
||||||
|
|
||||||
self.repo.add_new_files(fnames)
|
if self.repo:
|
||||||
|
self.repo.add_new_files(fnames)
|
||||||
|
|
||||||
# validate the functions jsonschema
|
# validate the functions jsonschema
|
||||||
if self.functions:
|
if self.functions:
|
||||||
|
|
|
@ -6,7 +6,6 @@ from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
import git
|
import git
|
||||||
import openai
|
import openai
|
||||||
import requests
|
|
||||||
|
|
||||||
from aider import models
|
from aider import models
|
||||||
from aider.coders import Coder
|
from aider.coders import Coder
|
||||||
|
@ -215,50 +214,6 @@ class TestCoder(unittest.TestCase):
|
||||||
# Assert that the returned message is the expected one
|
# Assert that the returned message is the expected one
|
||||||
self.assertEqual(result, 'a good "commit message"')
|
self.assertEqual(result, 'a good "commit message"')
|
||||||
|
|
||||||
@patch("aider.coders.base_coder.openai.ChatCompletion.create")
|
|
||||||
@patch("builtins.print")
|
|
||||||
def test_send_with_retries_rate_limit_error(self, mock_print, mock_chat_completion_create):
|
|
||||||
# Mock the IO object
|
|
||||||
mock_io = MagicMock()
|
|
||||||
|
|
||||||
# Initialize the Coder object with the mocked IO and mocked repo
|
|
||||||
coder = Coder.create(models.GPT4, None, mock_io)
|
|
||||||
|
|
||||||
# Set up the mock to raise RateLimitError on
|
|
||||||
# the first call and return None on the second call
|
|
||||||
mock_chat_completion_create.side_effect = [
|
|
||||||
openai.error.RateLimitError("Rate limit exceeded"),
|
|
||||||
None,
|
|
||||||
]
|
|
||||||
|
|
||||||
# Call the send_with_retries method
|
|
||||||
coder.send_with_retries("model", ["message"], None)
|
|
||||||
|
|
||||||
# Assert that print was called once
|
|
||||||
mock_print.assert_called_once()
|
|
||||||
|
|
||||||
@patch("aider.coders.base_coder.openai.ChatCompletion.create")
|
|
||||||
@patch("builtins.print")
|
|
||||||
def test_send_with_retries_connection_error(self, mock_print, mock_chat_completion_create):
|
|
||||||
# Mock the IO object
|
|
||||||
mock_io = MagicMock()
|
|
||||||
|
|
||||||
# Initialize the Coder object with the mocked IO and mocked repo
|
|
||||||
coder = Coder.create(models.GPT4, None, mock_io)
|
|
||||||
|
|
||||||
# Set up the mock to raise ConnectionError on the first call
|
|
||||||
# and return None on the second call
|
|
||||||
mock_chat_completion_create.side_effect = [
|
|
||||||
requests.exceptions.ConnectionError("Connection error"),
|
|
||||||
None,
|
|
||||||
]
|
|
||||||
|
|
||||||
# Call the send_with_retries method
|
|
||||||
coder.send_with_retries("model", ["message"], None)
|
|
||||||
|
|
||||||
# Assert that print was called once
|
|
||||||
mock_print.assert_called_once()
|
|
||||||
|
|
||||||
def test_run_with_file_deletion(self):
|
def test_run_with_file_deletion(self):
|
||||||
# Create a few temporary files
|
# Create a few temporary files
|
||||||
|
|
||||||
|
|
41
tests/test_sendchat.py
Normal file
41
tests/test_sendchat.py
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
import unittest
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import openai
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from aider.sendchat import send_with_retries
|
||||||
|
|
||||||
|
|
||||||
|
class TestSendChat(unittest.TestCase):
|
||||||
|
@patch("aider.sendchat.openai.ChatCompletion.create")
|
||||||
|
@patch("builtins.print")
|
||||||
|
def test_send_with_retries_rate_limit_error(self, mock_print, mock_chat_completion_create):
|
||||||
|
# Set up the mock to raise RateLimitError on
|
||||||
|
# the first call and return None on the second call
|
||||||
|
mock_chat_completion_create.side_effect = [
|
||||||
|
openai.error.RateLimitError("Rate limit exceeded"),
|
||||||
|
None,
|
||||||
|
]
|
||||||
|
|
||||||
|
# Call the send_with_retries method
|
||||||
|
send_with_retries("model", ["message"], None, False)
|
||||||
|
|
||||||
|
# Assert that print was called once
|
||||||
|
mock_print.assert_called_once()
|
||||||
|
|
||||||
|
@patch("aider.sendchat.openai.ChatCompletion.create")
|
||||||
|
@patch("builtins.print")
|
||||||
|
def test_send_with_retries_connection_error(self, mock_print, mock_chat_completion_create):
|
||||||
|
# Set up the mock to raise ConnectionError on the first call
|
||||||
|
# and return None on the second call
|
||||||
|
mock_chat_completion_create.side_effect = [
|
||||||
|
requests.exceptions.ConnectionError("Connection error"),
|
||||||
|
None,
|
||||||
|
]
|
||||||
|
|
||||||
|
# Call the send_with_retries method
|
||||||
|
send_with_retries("model", ["message"], None, False)
|
||||||
|
|
||||||
|
# Assert that print was called once
|
||||||
|
mock_print.assert_called_once()
|
Loading…
Add table
Add a link
Reference in a new issue