From b70f8ea7bcb77885c84e8f1ef3f5f6019f8b8a1c Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Wed, 7 Jun 2023 12:37:10 -0700 Subject: [PATCH] Use models...max_context_tokens --- aider/commands.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/aider/commands.py b/aider/commands.py index 4a0450504..dd42c2e85 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -121,22 +121,24 @@ class Commands: tokens = len(self.tokenizer.encode(quoted)) res.append((tokens, relative_fname)) - print("Context window usage, in k-tokens:") + print("Context window usage, in tokens:") print() + def fmt(v): + return format(int(v), ",").rjust(6) + total = 0 for tk, msg in res: - tk /= 1024 total += tk - print(f"{tk:6.3f} {msg}") + print(f"{fmt(tk)} {msg}") print() - print(f"{total:6.3f} total") + print(f"{fmt(total)} total") - limit = 8 if self.coder.main_model == models.GPT4 else 4 + limit = self.coder.main_model.max_context_tokens remaining = limit - total - print(f"{remaining:6.3f} remaining") - print(f"{limit:6.3f} max context window") + print(f"{fmt(remaining)} remaining") + print(f"{fmt(limit)} max context window") def cmd_undo(self, args): "Undo the last git commit if it was done by aider"