refactor: conditionally output cached tokens message based on verbosity

This commit is contained in:
Paul Gauthier 2024-08-29 06:36:22 -07:00 committed by Paul Gauthier (aider)
parent cbc59d919b
commit a47cb9cdea

View file

@ -1040,8 +1040,8 @@ class Coder:
completion.usage, "prompt_cache_hit_tokens", 0
) or getattr(completion.usage, "cache_read_input_tokens", 0)
# if self.verbose:
self.io.tool_output(f"Warmed {format_tokens(cache_hit_tokens)} cached tokens.")
if self.verbose:
self.io.tool_output(f"Warmed {format_tokens(cache_hit_tokens)} cached tokens.")
self.cache_warming_thread = threading.Timer(0, warm_cache_worker)
self.cache_warming_thread.daemon = True