fix: Use temperature setting from model configuration

This commit is contained in:
Paul Gauthier 2024-09-12 14:24:21 -07:00 committed by Paul Gauthier (aider)
parent 8aee4d25ed
commit 1755d2e0f4
3 changed files with 11 additions and 2 deletions

View file

@ -1381,6 +1381,11 @@ class Coder:
self.io.log_llm_history("TO LLM", format_messages(messages))
if self.main_model.use_temperature:
temp = self.temperature
else:
temp = None
completion = None
try:
hash_object, completion = send_completion(
@ -1388,7 +1393,7 @@ class Coder:
messages,
functions,
self.stream,
self.temperature,
temp,
extra_headers=model.extra_headers,
max_tokens=model.max_tokens,
)

View file

@ -78,6 +78,7 @@ class ModelSettings:
cache_control: bool = False
caches_by_default: bool = False
use_system_prompt: bool = True
use_temperature: bool = True
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
@ -433,6 +434,7 @@ MODEL_SETTINGS = [
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
ModelSettings(
"o1-mini",
@ -441,6 +443,7 @@ MODEL_SETTINGS = [
use_repo_map=True,
reminder="user",
use_system_prompt=False,
use_temperature=False,
),
]

View file

@ -60,9 +60,10 @@ def send_completion(
kwargs = dict(
model=model_name,
messages=messages,
# temperature=temperature,
stream=stream,
)
if temperature is not None:
kwargs["temperature"] = temperature
if functions is not None:
function = functions[0]