fix: Simplify temperature handling in model completion call

This commit is contained in:
Paul Gauthier 2025-02-04 11:45:04 -08:00 committed by Paul Gauthier (aider)
parent b1852526f5
commit ddec8325e7

View file

@ -1612,19 +1612,13 @@ See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size
self.io.log_llm_history("TO LLM", format_messages(messages))
if self.main_model.use_temperature:
temp = self.temperature
else:
temp = None
completion = None
try:
hash_object, completion = model.send_completion(
messages,
functions,
self.stream,
temp,
extra_params=model.extra_params,
self.temperature,
)
self.chat_completion_call_hashes.append(hash_object.hexdigest())