mirror of
https://github.com/Aider-AI/aider.git
synced 2025-05-30 17:24:59 +00:00
fix: Simplify temperature handling in model completion call
This commit is contained in:
parent
b1852526f5
commit
ddec8325e7
1 changed files with 1 additions and 7 deletions
|
@ -1612,19 +1612,13 @@ See https://aider.chat/docs/llms/ollama.html#setting-the-context-window-size
|
||||||
|
|
||||||
self.io.log_llm_history("TO LLM", format_messages(messages))
|
self.io.log_llm_history("TO LLM", format_messages(messages))
|
||||||
|
|
||||||
if self.main_model.use_temperature:
|
|
||||||
temp = self.temperature
|
|
||||||
else:
|
|
||||||
temp = None
|
|
||||||
|
|
||||||
completion = None
|
completion = None
|
||||||
try:
|
try:
|
||||||
hash_object, completion = model.send_completion(
|
hash_object, completion = model.send_completion(
|
||||||
messages,
|
messages,
|
||||||
functions,
|
functions,
|
||||||
self.stream,
|
self.stream,
|
||||||
temp,
|
self.temperature,
|
||||||
extra_params=model.extra_params,
|
|
||||||
)
|
)
|
||||||
self.chat_completion_call_hashes.append(hash_object.hexdigest())
|
self.chat_completion_call_hashes.append(hash_object.hexdigest())
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue