From 716e4d99a0a74913de2efdcb175a45fa9aacad31 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Sun, 23 Jun 2024 16:22:05 -0700 Subject: [PATCH] ensure the LLM response is logged even if token limits reached #705 --- aider/coders/base_coder.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index c5d86c38c..477446eb9 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1041,14 +1041,14 @@ class Coder: except KeyboardInterrupt: self.keyboard_interrupt() interrupted = True - - if self.partial_response_content: - self.io.ai_output(self.partial_response_content) - elif self.partial_response_function_call: - # TODO: push this into subclasses - args = self.parse_partial_args() - if args: - self.io.ai_output(json.dumps(args, indent=4)) + finally: + if self.partial_response_content: + self.io.ai_output(self.partial_response_content) + elif self.partial_response_function_call: + # TODO: push this into subclasses + args = self.parse_partial_args() + if args: + self.io.ai_output(json.dumps(args, indent=4)) if interrupted: raise KeyboardInterrupt