mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-29 00:35:00 +00:00
Merge d2dc533de7
into 3caab85931
This commit is contained in:
commit
b744bc03a6
3 changed files with 91 additions and 7 deletions
|
@ -218,6 +218,10 @@ class Coder:
|
|||
|
||||
output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
|
||||
|
||||
# Check for copy paste mode instead of api
|
||||
if main_model.copy_paste_instead_of_api:
|
||||
output += ", copy paste mode"
|
||||
|
||||
# Check for thinking token budget
|
||||
thinking_tokens = main_model.get_thinking_tokens()
|
||||
if thinking_tokens:
|
||||
|
@ -240,10 +244,18 @@ class Coder:
|
|||
f"Editor model: {main_model.editor_model.name} with"
|
||||
f" {main_model.editor_edit_format} edit format"
|
||||
)
|
||||
|
||||
if main_model.editor_model.copy_paste_instead_of_api:
|
||||
output += ", copy paste mode"
|
||||
|
||||
lines.append(output)
|
||||
|
||||
if weak_model is not main_model:
|
||||
output = f"Weak model: {weak_model.name}"
|
||||
|
||||
if main_model.weak_model.copy_paste_instead_of_api:
|
||||
output += ", copy paste mode"
|
||||
|
||||
lines.append(output)
|
||||
|
||||
# Repo
|
||||
|
@ -416,7 +428,7 @@ class Coder:
|
|||
self.main_model.reasoning_tag if self.main_model.reasoning_tag else REASONING_TAG
|
||||
)
|
||||
|
||||
self.stream = stream and main_model.streaming
|
||||
self.stream = stream and main_model.streaming and not main_model.copy_paste_instead_of_api
|
||||
|
||||
if cache_prompts and self.main_model.cache_control:
|
||||
self.add_cache_headers = True
|
||||
|
|
|
@ -820,6 +820,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
editor_model=args.editor_model,
|
||||
editor_edit_format=args.editor_edit_format,
|
||||
verbose=args.verbose,
|
||||
io=io,
|
||||
)
|
||||
|
||||
# Check if deprecated remove_reasoning is set
|
||||
|
@ -947,7 +948,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
|
|||
|
||||
if args.cache_prompts and args.map_refresh == "auto":
|
||||
args.map_refresh = "files"
|
||||
|
||||
|
||||
if not main_model.streaming:
|
||||
if args.stream:
|
||||
io.tool_warning(
|
||||
|
|
|
@ -7,11 +7,13 @@ import os
|
|||
import platform
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, fields
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import json5
|
||||
import pyperclip
|
||||
import yaml
|
||||
from PIL import Image
|
||||
|
||||
|
@ -304,9 +306,13 @@ model_info_manager = ModelInfoManager()
|
|||
|
||||
|
||||
class Model(ModelSettings):
|
||||
def __init__(
|
||||
self, model, weak_model=None, editor_model=None, editor_edit_format=None, verbose=False
|
||||
):
|
||||
COPY_PASTE_PREFIX = "cp:"
|
||||
|
||||
def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None, verbose=False, io=None):
|
||||
self.io = io
|
||||
self.copy_paste_instead_of_api = model.startswith(self.COPY_PASTE_PREFIX)
|
||||
model = model.removeprefix(self.COPY_PASTE_PREFIX)
|
||||
|
||||
# Map any alias to its canonical name
|
||||
model = MODEL_ALIASES.get(model, model)
|
||||
|
||||
|
@ -378,7 +384,7 @@ class Model(ModelSettings):
|
|||
# If no exact match, try generic settings
|
||||
if not exact_match:
|
||||
self.apply_generic_model_settings(model)
|
||||
|
||||
|
||||
# Apply override settings last if they exist
|
||||
if (
|
||||
self.extra_model_settings
|
||||
|
@ -555,6 +561,9 @@ class Model(ModelSettings):
|
|||
# If weak_model_name is provided, override the model settings
|
||||
if provided_weak_model_name:
|
||||
self.weak_model_name = provided_weak_model_name
|
||||
elif self.copy_paste_instead_of_api:
|
||||
self.weak_model = self
|
||||
return
|
||||
|
||||
if not self.weak_model_name:
|
||||
self.weak_model = self
|
||||
|
@ -568,7 +577,7 @@ class Model(ModelSettings):
|
|||
self.weak_model_name,
|
||||
weak_model=False,
|
||||
)
|
||||
return self.weak_model
|
||||
return
|
||||
|
||||
def commit_message_models(self):
|
||||
return [self.weak_model, self]
|
||||
|
@ -577,6 +586,9 @@ class Model(ModelSettings):
|
|||
# If editor_model_name is provided, override the model settings
|
||||
if provided_editor_model_name:
|
||||
self.editor_model_name = provided_editor_model_name
|
||||
elif self.copy_paste_instead_of_api:
|
||||
self.editor_model_name = self.name
|
||||
|
||||
if editor_edit_format:
|
||||
self.editor_edit_format = editor_edit_format
|
||||
|
||||
|
@ -874,6 +886,9 @@ class Model(ModelSettings):
|
|||
return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/")
|
||||
|
||||
def send_completion(self, messages, functions, stream, temperature=None):
|
||||
if self.copy_paste_instead_of_api:
|
||||
return self.copy_paste_completion(messages)
|
||||
|
||||
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
|
||||
sanity_check_messages(messages)
|
||||
|
||||
|
@ -917,6 +932,58 @@ class Model(ModelSettings):
|
|||
res = litellm.completion(**kwargs)
|
||||
return hash_object, res
|
||||
|
||||
def copy_paste_completion(self, messages):
|
||||
formatted_messages = "\n".join(
|
||||
f"{msg['content']}" for msg in messages if msg.get("content")
|
||||
)
|
||||
|
||||
pyperclip.copy(formatted_messages)
|
||||
|
||||
if self.io is not None:
|
||||
self.io.tool_output(
|
||||
"""✓ Request copied to clipboard
|
||||
→ Paste into LLM web UI
|
||||
← Copy response back to clipboard
|
||||
|
||||
Monitoring clipboard for changes (press Ctrl+C to cancel)..."""
|
||||
)
|
||||
|
||||
last_clipboard = pyperclip.paste()
|
||||
while last_clipboard == pyperclip.paste():
|
||||
time.sleep(0.5)
|
||||
|
||||
response = pyperclip.paste()
|
||||
|
||||
completion = litellm.ModelResponse(
|
||||
id=f"chatcmpl-{uuid.uuid4()}",
|
||||
choices=[
|
||||
{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": response,
|
||||
"function_call": None,
|
||||
},
|
||||
"finish_reason": "stop",
|
||||
"index": 0,
|
||||
}
|
||||
],
|
||||
created=int(time.time()),
|
||||
model=self.name,
|
||||
usage={"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
|
||||
object="chat.completion",
|
||||
)
|
||||
|
||||
kwargs = dict(
|
||||
model=self.name,
|
||||
messages=messages,
|
||||
stream=False
|
||||
)
|
||||
|
||||
key = json.dumps(kwargs, sort_keys=True).encode()
|
||||
hash_object = hashlib.sha1(key)
|
||||
|
||||
return hash_object, completion
|
||||
|
||||
def simple_send_with_retries(self, messages):
|
||||
from aider.exceptions import LiteLLMExceptions
|
||||
|
||||
|
@ -1047,6 +1114,10 @@ def sanity_check_models(io, main_model):
|
|||
def sanity_check_model(io, model):
|
||||
show = False
|
||||
|
||||
# Skip sanity check if using copy paste mode instead of api
|
||||
if model.copy_paste_instead_of_api:
|
||||
return show
|
||||
|
||||
if model.missing_keys:
|
||||
show = True
|
||||
io.tool_warning(f"Warning: {model} expects these environment variables")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue