mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-28 00:05:01 +00:00
fix: Simplify temperature handling in model completion call
This commit is contained in:
parent
b1852526f5
commit
ddec8325e7
1 changed files with 1 additions and 7 deletions
|
@ -1612,19 +1612,13 @@ See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size
|
|||
|
||||
self.io.log_llm_history("TO LLM", format_messages(messages))
|
||||
|
||||
if self.main_model.use_temperature:
|
||||
temp = self.temperature
|
||||
else:
|
||||
temp = None
|
||||
|
||||
completion = None
|
||||
try:
|
||||
hash_object, completion = model.send_completion(
|
||||
messages,
|
||||
functions,
|
||||
self.stream,
|
||||
temp,
|
||||
extra_params=model.extra_params,
|
||||
self.temperature,
|
||||
)
|
||||
self.chat_completion_call_hashes.append(hash_object.hexdigest())
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue