mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-30 09:14:59 +00:00
roughed in edit-formats
This commit is contained in:
parent
99cbd10b56
commit
21672d5231
2 changed files with 14 additions and 5 deletions
|
@ -90,6 +90,10 @@ class Coder:
|
|||
self.io.tool_output(
|
||||
f"Using {main_model.name} (experimental): disabling ctags/repo-maps.",
|
||||
)
|
||||
|
||||
self.edit_format = self.main_model.edit_format
|
||||
|
||||
if self.edit_format == "whole":
|
||||
self.gpt_prompts = prompts.GPT35()
|
||||
else:
|
||||
self.gpt_prompts = prompts.GPT4()
|
||||
|
@ -343,8 +347,9 @@ class Coder:
|
|||
if edit_error:
|
||||
return edit_error
|
||||
|
||||
if self.main_model in models.GPT4_models or not edited:
|
||||
# Don't add 3.5 assistant messages to the history if they contain "edits"
|
||||
if not (self.edit_format == "whole" and edited):
|
||||
# Don't add assistant messages to the history if they contain "edits"
|
||||
# from the "whole" edit format.
|
||||
# Because those edits are actually fully copies of the file!
|
||||
# That wastes too much context window.
|
||||
self.cur_messages += [dict(role="assistant", content=content)]
|
||||
|
@ -478,7 +483,7 @@ class Coder:
|
|||
|
||||
if self.pretty:
|
||||
show_resp = self.resp
|
||||
if self.main_model in models.GPT35_models:
|
||||
if self.edit_format == "whole":
|
||||
try:
|
||||
show_resp = self.update_files_gpt35(self.resp, mode="diff")
|
||||
except ValueError:
|
||||
|
@ -782,9 +787,9 @@ class Coder:
|
|||
return set(self.get_all_relative_files()) - set(self.get_inchat_relative_files())
|
||||
|
||||
def apply_updates(self, content):
|
||||
if self.main_model in models.GPT4_models:
|
||||
if self.edit_format == "diff":
|
||||
method = self.update_files_gpt4
|
||||
elif self.main_model in models.GPT35_models:
|
||||
elif self.edit_format == "whole":
|
||||
method = self.update_files_gpt35
|
||||
else:
|
||||
raise ValueError(f"apply_updates() doesn't support {self.main_model.name}")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
class Model_GPT4_32k:
|
||||
name = "gpt-4-32k"
|
||||
max_context_tokens = 32 * 1024
|
||||
edit_format = "diff"
|
||||
|
||||
|
||||
GPT4_32k = Model_GPT4_32k()
|
||||
|
@ -9,6 +10,7 @@ GPT4_32k = Model_GPT4_32k()
|
|||
class Model_GPT4:
|
||||
name = "gpt-4"
|
||||
max_context_tokens = 8 * 1024
|
||||
edit_format = "diff"
|
||||
|
||||
|
||||
GPT4 = Model_GPT4()
|
||||
|
@ -17,6 +19,7 @@ GPT4 = Model_GPT4()
|
|||
class Model_GPT35:
|
||||
name = "gpt-3.5-turbo"
|
||||
max_context_tokens = 4 * 1024
|
||||
edit_format = "whole"
|
||||
|
||||
|
||||
GPT35 = Model_GPT35()
|
||||
|
@ -25,6 +28,7 @@ GPT35 = Model_GPT35()
|
|||
class Model_GPT35_16k:
|
||||
name = "gpt-3.5-turbo-16k"
|
||||
max_context_tokens = 16 * 1024
|
||||
edit_format = "whole"
|
||||
|
||||
|
||||
GPT35_16k = Model_GPT35_16k()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue