Updated gpt-4 turbo model switches

This commit is contained in:
Paul Gauthier 2024-02-03 08:39:19 -08:00
parent bca265abfc
commit 174702babe
4 changed files with 6 additions and 6 deletions

View file

@ -2,7 +2,7 @@
### v0.23.0 ### v0.23.0
- Added support for the new `gpt-4-0125-preview`, use `--model gpt-4-0125-preview`. - Added support for `--model gpt-4-0125-preview` and OpenAI's alias `--model gpt-4-turbo-preview`. The `--4turbo` switch remains an alias for `--model gpt-4-1106-preview` at this time.
- New `/test` command that runs a command and adds the output to the chat on non-zero exit status. - New `/test` command that runs a command and adds the output to the chat on non-zero exit status.
- Improved streaming of markdown to the terminal. - Improved streaming of markdown to the terminal.
- Added `/quit` as alias for `/exit`. - Added `/quit` as alias for `/exit`.

View file

@ -157,15 +157,15 @@ def main(argv=None, input=None, output=None, force_git_root=None):
default=False, default=False,
help="Override to skip model availability check (default: False)", help="Override to skip model availability check (default: False)",
) )
default_4_turbo_model = models.GPT4_1106_PREVIEW default_4_turbo_model = "gpt-4-1106-preview"
core_group.add_argument( core_group.add_argument(
"--4-turbo", "--4-turbo",
"--4turbo", "--4turbo",
"--4", "--4",
action="store_const", action="store_const",
dest="model", dest="model",
const=default_4_turbo_model.name, const=default_4_turbo_model,
help=f"Use {default_4_turbo_model.name} model for the main chat (gpt-4 is better)", help=f"Use {default_4_turbo_model} model for the main chat (gpt-4 is better)",
) )
default_3_model = models.GPT35_1106 default_3_model = models.GPT35_1106
core_group.add_argument( core_group.add_argument(

View file

@ -4,7 +4,6 @@ from .openrouter import OpenRouterModel
GPT4 = Model.create("gpt-4") GPT4 = Model.create("gpt-4")
GPT4_0613 = Model.create("gpt-4-0613") GPT4_0613 = Model.create("gpt-4-0613")
GPT4_1106_PREVIEW = Model.create("gpt-4-1106-preview")
GPT35 = Model.create("gpt-3.5-turbo") GPT35 = Model.create("gpt-3.5-turbo")
GPT35_1106 = Model.create("gpt-3.5-turbo-1106") GPT35_1106 = Model.create("gpt-3.5-turbo-1106")
GPT35_16k = Model.create("gpt-3.5-turbo-16k") GPT35_16k = Model.create("gpt-3.5-turbo-16k")

View file

@ -9,6 +9,7 @@ known_tokens = {
"gpt-4": 8, "gpt-4": 8,
"gpt-4-1106-preview": 128, "gpt-4-1106-preview": 128,
"gpt-4-0125-preview": 128, "gpt-4-0125-preview": 128,
"gpt-4-turbo-preview": 128,
"gpt-3.5-turbo-1106": 16, "gpt-3.5-turbo-1106": 16,
} }
@ -34,7 +35,7 @@ class OpenAIModel(Model):
self.tokenizer = tiktoken.encoding_for_model(name) self.tokenizer = tiktoken.encoding_for_model(name)
if self.is_gpt4(): if self.is_gpt4():
if name in ("gpt-4-1106-preview", "gpt-4-0125-preview"): if name in ("gpt-4-1106-preview", "gpt-4-0125-preview", "gpt-4-turbo-preview"):
self.edit_format = "udiff" self.edit_format = "udiff"
else: else:
self.edit_format = "diff" self.edit_format = "diff"