Fixed up images in chat

This commit is contained in:
Paul Gauthier 2024-04-18 14:39:32 -07:00
parent f1c09ececf
commit 0da1b59901
5 changed files with 14 additions and 29 deletions

View file

@ -362,7 +362,7 @@ class Coder:
return files_messages
def get_images_message(self):
if not utils.is_gpt4_with_openai_base_url(self.main_model.name):
if not self.main_model.accepts_images:
return None
image_messages = []

View file

@ -10,7 +10,7 @@ from prompt_toolkit.completion import Completion
from aider import prompts, voice
from aider.scrape import Scraper
from aider.utils import is_gpt4_with_openai_base_url, is_image_file
from aider.utils import is_image_file
from .dump import dump # noqa: F401
@ -200,7 +200,7 @@ class Commands:
# only switch to image model token count if gpt4 and openai and image in files
image_in_chat = False
if is_gpt4_with_openai_base_url(self.coder.main_model.name, self.coder.client):
if self.coder.main_model.accepts_images:
image_in_chat = any(
is_image_file(relative_fname)
for relative_fname in self.coder.get_inchat_relative_files()
@ -378,9 +378,7 @@ class Commands:
if abs_file_path in self.coder.abs_fnames:
self.io.tool_error(f"{matched_file} is already in the chat")
else:
if is_image_file(matched_file) and not is_gpt4_with_openai_base_url(
self.coder.main_model.name, self.coder.client
):
if is_image_file(matched_file) and not self.coder.main_model.accepts_images:
self.io.tool_error(
f"Cannot add image file {matched_file} as the model does not support image"
" files"

View file

@ -27,6 +27,7 @@ class ModelSettings:
weak_model_name: str = "gpt-3.5-turbo-0125"
use_repo_map: bool = False
send_undo_reply: bool = False
accepts_images: bool = False
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -57,6 +58,14 @@ MODEL_SETTINGS = [
"udiff",
use_repo_map=True,
send_undo_reply=True,
accepts_images=True,
),
ModelSettings(
"gpt-4-turbo",
"udiff",
use_repo_map=True,
send_undo_reply=True,
accepts_images=True,
),
ModelSettings(
"gpt-4-0125-preview",
@ -75,6 +84,7 @@ MODEL_SETTINGS = [
"diff",
use_repo_map=True,
send_undo_reply=True,
accepts_images=True,
),
ModelSettings(
"gpt-4-0613",

View file

@ -10,7 +10,6 @@ import openai
from openai import APIConnectionError, InternalServerError, RateLimitError
from aider.dump import dump # noqa: F401
from aider.utils import is_gpt4_with_openai_base_url
CACHE_PATH = "~/.aider.send.cache.v1"
CACHE = None
@ -40,17 +39,6 @@ def send_with_retries(model_name, messages, functions, stream):
if functions is not None:
kwargs["functions"] = functions
# Check conditions to switch to gpt-4-vision-preview or strip out image_url messages
if is_gpt4_with_openai_base_url(model_name):
if any(
isinstance(msg.get("content"), list)
and any("image_url" in item for item in msg.get("content") if isinstance(item, dict))
for msg in messages
):
kwargs["model"] = "gpt-4-vision-preview"
# gpt-4-vision is limited to max tokens of 4096
kwargs["max_tokens"] = 4096
key = json.dumps(kwargs, sort_keys=True).encode()
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes

View file

@ -104,14 +104,3 @@ def show_messages(messages, title=None, functions=None):
if functions:
dump(functions)
# TODO: fix this
def is_gpt4_with_openai_base_url(model_name):
"""
Check if the model_name starts with 'gpt-4' and the client base URL includes 'api.openai.com'.
:param model_name: The name of the model to check.
:return: True if conditions are met, False otherwise.
"""
return model_name.startswith("gpt-4")