mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-24 22:34:59 +00:00
Fixed up images in chat
This commit is contained in:
parent
f1c09ececf
commit
0da1b59901
5 changed files with 14 additions and 29 deletions
|
@ -362,7 +362,7 @@ class Coder:
|
||||||
return files_messages
|
return files_messages
|
||||||
|
|
||||||
def get_images_message(self):
|
def get_images_message(self):
|
||||||
if not utils.is_gpt4_with_openai_base_url(self.main_model.name):
|
if not self.main_model.accepts_images:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
image_messages = []
|
image_messages = []
|
||||||
|
|
|
@ -10,7 +10,7 @@ from prompt_toolkit.completion import Completion
|
||||||
|
|
||||||
from aider import prompts, voice
|
from aider import prompts, voice
|
||||||
from aider.scrape import Scraper
|
from aider.scrape import Scraper
|
||||||
from aider.utils import is_gpt4_with_openai_base_url, is_image_file
|
from aider.utils import is_image_file
|
||||||
|
|
||||||
from .dump import dump # noqa: F401
|
from .dump import dump # noqa: F401
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ class Commands:
|
||||||
|
|
||||||
# only switch to image model token count if gpt4 and openai and image in files
|
# only switch to image model token count if gpt4 and openai and image in files
|
||||||
image_in_chat = False
|
image_in_chat = False
|
||||||
if is_gpt4_with_openai_base_url(self.coder.main_model.name, self.coder.client):
|
if self.coder.main_model.accepts_images:
|
||||||
image_in_chat = any(
|
image_in_chat = any(
|
||||||
is_image_file(relative_fname)
|
is_image_file(relative_fname)
|
||||||
for relative_fname in self.coder.get_inchat_relative_files()
|
for relative_fname in self.coder.get_inchat_relative_files()
|
||||||
|
@ -378,9 +378,7 @@ class Commands:
|
||||||
if abs_file_path in self.coder.abs_fnames:
|
if abs_file_path in self.coder.abs_fnames:
|
||||||
self.io.tool_error(f"{matched_file} is already in the chat")
|
self.io.tool_error(f"{matched_file} is already in the chat")
|
||||||
else:
|
else:
|
||||||
if is_image_file(matched_file) and not is_gpt4_with_openai_base_url(
|
if is_image_file(matched_file) and not self.coder.main_model.accepts_images:
|
||||||
self.coder.main_model.name, self.coder.client
|
|
||||||
):
|
|
||||||
self.io.tool_error(
|
self.io.tool_error(
|
||||||
f"Cannot add image file {matched_file} as the model does not support image"
|
f"Cannot add image file {matched_file} as the model does not support image"
|
||||||
" files"
|
" files"
|
||||||
|
|
|
@ -27,6 +27,7 @@ class ModelSettings:
|
||||||
weak_model_name: str = "gpt-3.5-turbo-0125"
|
weak_model_name: str = "gpt-3.5-turbo-0125"
|
||||||
use_repo_map: bool = False
|
use_repo_map: bool = False
|
||||||
send_undo_reply: bool = False
|
send_undo_reply: bool = False
|
||||||
|
accepts_images: bool = False
|
||||||
|
|
||||||
|
|
||||||
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
||||||
|
@ -57,6 +58,14 @@ MODEL_SETTINGS = [
|
||||||
"udiff",
|
"udiff",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
accepts_images=True,
|
||||||
|
),
|
||||||
|
ModelSettings(
|
||||||
|
"gpt-4-turbo",
|
||||||
|
"udiff",
|
||||||
|
use_repo_map=True,
|
||||||
|
send_undo_reply=True,
|
||||||
|
accepts_images=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-0125-preview",
|
"gpt-4-0125-preview",
|
||||||
|
@ -75,6 +84,7 @@ MODEL_SETTINGS = [
|
||||||
"diff",
|
"diff",
|
||||||
use_repo_map=True,
|
use_repo_map=True,
|
||||||
send_undo_reply=True,
|
send_undo_reply=True,
|
||||||
|
accepts_images=True,
|
||||||
),
|
),
|
||||||
ModelSettings(
|
ModelSettings(
|
||||||
"gpt-4-0613",
|
"gpt-4-0613",
|
||||||
|
|
|
@ -10,7 +10,6 @@ import openai
|
||||||
from openai import APIConnectionError, InternalServerError, RateLimitError
|
from openai import APIConnectionError, InternalServerError, RateLimitError
|
||||||
|
|
||||||
from aider.dump import dump # noqa: F401
|
from aider.dump import dump # noqa: F401
|
||||||
from aider.utils import is_gpt4_with_openai_base_url
|
|
||||||
|
|
||||||
CACHE_PATH = "~/.aider.send.cache.v1"
|
CACHE_PATH = "~/.aider.send.cache.v1"
|
||||||
CACHE = None
|
CACHE = None
|
||||||
|
@ -40,17 +39,6 @@ def send_with_retries(model_name, messages, functions, stream):
|
||||||
if functions is not None:
|
if functions is not None:
|
||||||
kwargs["functions"] = functions
|
kwargs["functions"] = functions
|
||||||
|
|
||||||
# Check conditions to switch to gpt-4-vision-preview or strip out image_url messages
|
|
||||||
if is_gpt4_with_openai_base_url(model_name):
|
|
||||||
if any(
|
|
||||||
isinstance(msg.get("content"), list)
|
|
||||||
and any("image_url" in item for item in msg.get("content") if isinstance(item, dict))
|
|
||||||
for msg in messages
|
|
||||||
):
|
|
||||||
kwargs["model"] = "gpt-4-vision-preview"
|
|
||||||
# gpt-4-vision is limited to max tokens of 4096
|
|
||||||
kwargs["max_tokens"] = 4096
|
|
||||||
|
|
||||||
key = json.dumps(kwargs, sort_keys=True).encode()
|
key = json.dumps(kwargs, sort_keys=True).encode()
|
||||||
|
|
||||||
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
|
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
|
||||||
|
|
|
@ -104,14 +104,3 @@ def show_messages(messages, title=None, functions=None):
|
||||||
|
|
||||||
if functions:
|
if functions:
|
||||||
dump(functions)
|
dump(functions)
|
||||||
|
|
||||||
|
|
||||||
# TODO: fix this
|
|
||||||
def is_gpt4_with_openai_base_url(model_name):
|
|
||||||
"""
|
|
||||||
Check if the model_name starts with 'gpt-4' and the client base URL includes 'api.openai.com'.
|
|
||||||
|
|
||||||
:param model_name: The name of the model to check.
|
|
||||||
:return: True if conditions are met, False otherwise.
|
|
||||||
"""
|
|
||||||
return model_name.startswith("gpt-4")
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue