This commit is contained in:
ther0bster 2025-05-13 16:48:46 -07:00 committed by GitHub
commit b744bc03a6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 91 additions and 7 deletions

View file

@ -218,6 +218,10 @@ class Coder:
output = f"{prefix}: {main_model.name} with {self.edit_format} edit format" output = f"{prefix}: {main_model.name} with {self.edit_format} edit format"
# Check for copy paste mode instead of api
if main_model.copy_paste_instead_of_api:
output += ", copy paste mode"
# Check for thinking token budget # Check for thinking token budget
thinking_tokens = main_model.get_thinking_tokens() thinking_tokens = main_model.get_thinking_tokens()
if thinking_tokens: if thinking_tokens:
@ -240,10 +244,18 @@ class Coder:
f"Editor model: {main_model.editor_model.name} with" f"Editor model: {main_model.editor_model.name} with"
f" {main_model.editor_edit_format} edit format" f" {main_model.editor_edit_format} edit format"
) )
if main_model.editor_model.copy_paste_instead_of_api:
output += ", copy paste mode"
lines.append(output) lines.append(output)
if weak_model is not main_model: if weak_model is not main_model:
output = f"Weak model: {weak_model.name}" output = f"Weak model: {weak_model.name}"
if main_model.weak_model.copy_paste_instead_of_api:
output += ", copy paste mode"
lines.append(output) lines.append(output)
# Repo # Repo
@ -416,7 +428,7 @@ class Coder:
self.main_model.reasoning_tag if self.main_model.reasoning_tag else REASONING_TAG self.main_model.reasoning_tag if self.main_model.reasoning_tag else REASONING_TAG
) )
self.stream = stream and main_model.streaming self.stream = stream and main_model.streaming and not main_model.copy_paste_instead_of_api
if cache_prompts and self.main_model.cache_control: if cache_prompts and self.main_model.cache_control:
self.add_cache_headers = True self.add_cache_headers = True

View file

@ -820,6 +820,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
editor_model=args.editor_model, editor_model=args.editor_model,
editor_edit_format=args.editor_edit_format, editor_edit_format=args.editor_edit_format,
verbose=args.verbose, verbose=args.verbose,
io=io,
) )
# Check if deprecated remove_reasoning is set # Check if deprecated remove_reasoning is set
@ -947,7 +948,7 @@ def main(argv=None, input=None, output=None, force_git_root=None, return_coder=F
if args.cache_prompts and args.map_refresh == "auto": if args.cache_prompts and args.map_refresh == "auto":
args.map_refresh = "files" args.map_refresh = "files"
if not main_model.streaming: if not main_model.streaming:
if args.stream: if args.stream:
io.tool_warning( io.tool_warning(

View file

@ -7,11 +7,13 @@ import os
import platform import platform
import sys import sys
import time import time
import uuid
from dataclasses import dataclass, fields from dataclasses import dataclass, fields
from pathlib import Path from pathlib import Path
from typing import Optional, Union from typing import Optional, Union
import json5 import json5
import pyperclip
import yaml import yaml
from PIL import Image from PIL import Image
@ -304,9 +306,13 @@ model_info_manager = ModelInfoManager()
class Model(ModelSettings): class Model(ModelSettings):
def __init__( COPY_PASTE_PREFIX = "cp:"
self, model, weak_model=None, editor_model=None, editor_edit_format=None, verbose=False
): def __init__(self, model, weak_model=None, editor_model=None, editor_edit_format=None, verbose=False, io=None):
self.io = io
self.copy_paste_instead_of_api = model.startswith(self.COPY_PASTE_PREFIX)
model = model.removeprefix(self.COPY_PASTE_PREFIX)
# Map any alias to its canonical name # Map any alias to its canonical name
model = MODEL_ALIASES.get(model, model) model = MODEL_ALIASES.get(model, model)
@ -378,7 +384,7 @@ class Model(ModelSettings):
# If no exact match, try generic settings # If no exact match, try generic settings
if not exact_match: if not exact_match:
self.apply_generic_model_settings(model) self.apply_generic_model_settings(model)
# Apply override settings last if they exist # Apply override settings last if they exist
if ( if (
self.extra_model_settings self.extra_model_settings
@ -555,6 +561,9 @@ class Model(ModelSettings):
# If weak_model_name is provided, override the model settings # If weak_model_name is provided, override the model settings
if provided_weak_model_name: if provided_weak_model_name:
self.weak_model_name = provided_weak_model_name self.weak_model_name = provided_weak_model_name
elif self.copy_paste_instead_of_api:
self.weak_model = self
return
if not self.weak_model_name: if not self.weak_model_name:
self.weak_model = self self.weak_model = self
@ -568,7 +577,7 @@ class Model(ModelSettings):
self.weak_model_name, self.weak_model_name,
weak_model=False, weak_model=False,
) )
return self.weak_model return
def commit_message_models(self): def commit_message_models(self):
return [self.weak_model, self] return [self.weak_model, self]
@ -577,6 +586,9 @@ class Model(ModelSettings):
# If editor_model_name is provided, override the model settings # If editor_model_name is provided, override the model settings
if provided_editor_model_name: if provided_editor_model_name:
self.editor_model_name = provided_editor_model_name self.editor_model_name = provided_editor_model_name
elif self.copy_paste_instead_of_api:
self.editor_model_name = self.name
if editor_edit_format: if editor_edit_format:
self.editor_edit_format = editor_edit_format self.editor_edit_format = editor_edit_format
@ -874,6 +886,9 @@ class Model(ModelSettings):
return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/") return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/")
def send_completion(self, messages, functions, stream, temperature=None): def send_completion(self, messages, functions, stream, temperature=None):
if self.copy_paste_instead_of_api:
return self.copy_paste_completion(messages)
if os.environ.get("AIDER_SANITY_CHECK_TURNS"): if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
sanity_check_messages(messages) sanity_check_messages(messages)
@ -917,6 +932,58 @@ class Model(ModelSettings):
res = litellm.completion(**kwargs) res = litellm.completion(**kwargs)
return hash_object, res return hash_object, res
def copy_paste_completion(self, messages):
formatted_messages = "\n".join(
f"{msg['content']}" for msg in messages if msg.get("content")
)
pyperclip.copy(formatted_messages)
if self.io is not None:
self.io.tool_output(
"""✓ Request copied to clipboard
Paste into LLM web UI
Copy response back to clipboard
Monitoring clipboard for changes (press Ctrl+C to cancel)..."""
)
last_clipboard = pyperclip.paste()
while last_clipboard == pyperclip.paste():
time.sleep(0.5)
response = pyperclip.paste()
completion = litellm.ModelResponse(
id=f"chatcmpl-{uuid.uuid4()}",
choices=[
{
"message": {
"role": "assistant",
"content": response,
"function_call": None,
},
"finish_reason": "stop",
"index": 0,
}
],
created=int(time.time()),
model=self.name,
usage={"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
object="chat.completion",
)
kwargs = dict(
model=self.name,
messages=messages,
stream=False
)
key = json.dumps(kwargs, sort_keys=True).encode()
hash_object = hashlib.sha1(key)
return hash_object, completion
def simple_send_with_retries(self, messages): def simple_send_with_retries(self, messages):
from aider.exceptions import LiteLLMExceptions from aider.exceptions import LiteLLMExceptions
@ -1047,6 +1114,10 @@ def sanity_check_models(io, main_model):
def sanity_check_model(io, model): def sanity_check_model(io, model):
show = False show = False
# Skip sanity check if using copy paste mode instead of api
if model.copy_paste_instead_of_api:
return show
if model.missing_keys: if model.missing_keys:
show = True show = True
io.tool_warning(f"Warning: {model} expects these environment variables") io.tool_warning(f"Warning: {model} expects these environment variables")