ensure the LLM response is logged even if token limits reached #705

This commit is contained in:
Paul Gauthier 2024-06-23 16:22:05 -07:00
parent ecb3d81055
commit 716e4d99a0

View file

@ -1041,14 +1041,14 @@ class Coder:
except KeyboardInterrupt: except KeyboardInterrupt:
self.keyboard_interrupt() self.keyboard_interrupt()
interrupted = True interrupted = True
finally:
if self.partial_response_content: if self.partial_response_content:
self.io.ai_output(self.partial_response_content) self.io.ai_output(self.partial_response_content)
elif self.partial_response_function_call: elif self.partial_response_function_call:
# TODO: push this into subclasses # TODO: push this into subclasses
args = self.parse_partial_args() args = self.parse_partial_args()
if args: if args:
self.io.ai_output(json.dumps(args, indent=4)) self.io.ai_output(json.dumps(args, indent=4))
if interrupted: if interrupted:
raise KeyboardInterrupt raise KeyboardInterrupt