ensure the LLM response is logged even if token limits reached #705

This commit is contained in:
Paul Gauthier 2024-06-23 16:22:05 -07:00
parent ecb3d81055
commit 716e4d99a0

View file

@ -1041,7 +1041,7 @@ class Coder:
except KeyboardInterrupt:
self.keyboard_interrupt()
interrupted = True
finally:
if self.partial_response_content:
self.io.ai_output(self.partial_response_content)
elif self.partial_response_function_call: