From 1af38e943d3052088ce7735423eb9128ed41770e Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 3 Aug 2023 11:45:51 -0300 Subject: [PATCH 1/4] copy --- docs/faq.md | 50 +++++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 0bacf5dd3..67c52b9ae 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -109,14 +109,41 @@ In these cases, here are some things you might try: Aider does not officially support use with LLMs other than OpenAI's gpt-3.5-turbo and gpt-4 and their variants. +This is because while it's "easy" to connect aider to a new LLM, it's "hard" +to actually teach new LLMs to *edit* code. -It seems to require model-specific tuning to get prompts and -editing formats working well with a new model. For example, GPT-3.5 and GPT-4 use very -different prompts and editing formats in aider right now. +GPT-3.5 is just barely able to understand how to modify existing source code files, +and GPT-4 is quite good at it. +Getting them working that well was a significant undertaking, involving +[specific code editing prompts and backends for each model and extensive benchmarking](https://aider.chat/docs/benchmarks.html). Adopting new LLMs will probably require a similar effort to tailor the -prompting and edit formats. +prompts and editing backends. That said, aider does provide some features to experiment with other models. +Numerous users have already done experiments with numerous models. +So far, no one has reported much success in working with them the way aider +can work with GPT-3.5 and GPT-4. + +Once we see signs that a *particular* model is capable of code editing, +it would be reasonable for aider to attempt to officially support such a model. +Until then, aider will simply maintain experimental support for using alternative models +as describe below. + +### OpenAI API compatible LLMs + +If you can make the model accessible via an OpenAI compatible API, +you can use `--openai-api-base` to connect to a different API endpoint. + +Here are some +[GitHub issues which may contain relevant information](https://github.com/paul-gauthier/aider/issues?q=is%3Aissue+%22openai-api-base%22+). + +### Local LLMs + +[LocalAI](https://github.com/go-skynet/LocalAI) +and +[SimpleAI](https://github.com/lhenault/simpleAI) +look like relevant tools to serve local models via a compatible API: + ### Azure @@ -149,21 +176,6 @@ See the [official Azure documentation on using OpenAI models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?tabs=command-line&pivots=programming-language-python) for more information on how to populate the above configuration values. -### Other LLMs - -If you can make the model accessible via an OpenAI compatible API, -you can use `--openai-api-base` to connect to a different API endpoint. - -Here are some -[GitHub issues which may contain relevant information](https://github.com/paul-gauthier/aider/issues?q=is%3Aissue+%22openai-api-base%22+). - -### Local LLMs - -[LocalAI](https://github.com/go-skynet/LocalAI) -and -[SimpleAI](https://github.com/lhenault/simpleAI) -look like relevant tools to serve local models via a compatible API: - From 9ef730fd97643ec8e0dbb65cd2090b91c261cb01 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 3 Aug 2023 11:48:24 -0300 Subject: [PATCH 2/4] copy --- docs/faq.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 67c52b9ae..51560f120 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -116,7 +116,7 @@ GPT-3.5 is just barely able to understand how to modify existing source code fil and GPT-4 is quite good at it. Getting them working that well was a significant undertaking, involving [specific code editing prompts and backends for each model and extensive benchmarking](https://aider.chat/docs/benchmarks.html). -Adopting new LLMs will probably require a similar effort to tailor the +Officially supporting new LLMs will probably require a similar effort to tailor the prompts and editing backends. That said, aider does provide some features to experiment with other models. @@ -126,8 +126,7 @@ can work with GPT-3.5 and GPT-4. Once we see signs that a *particular* model is capable of code editing, it would be reasonable for aider to attempt to officially support such a model. -Until then, aider will simply maintain experimental support for using alternative models -as describe below. +Until then, aider will simply maintain experimental support for using alternative models. ### OpenAI API compatible LLMs From 3ef16827fc9794888123494268dc55f6782d6d34 Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 3 Aug 2023 12:07:53 -0300 Subject: [PATCH 3/4] show a progress bar if there are on tags/idents caches --- aider/repomap.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/aider/repomap.py b/aider/repomap.py index e8f6e6434..defc4ed4b 100644 --- a/aider/repomap.py +++ b/aider/repomap.py @@ -14,6 +14,7 @@ from diskcache import Cache from pygments.lexers import guess_lexer_for_filename from pygments.token import Token from pygments.util import ClassNotFound +from tqdm import tqdm from aider import models @@ -74,6 +75,8 @@ class RepoMap: ctags_disabled_reason = "ctags not initialized" + cache_missing = False + def __init__( self, map_tokens=1024, @@ -232,13 +235,19 @@ class RepoMap: return True def load_tags_cache(self): - self.TAGS_CACHE = Cache(Path(self.root) / self.TAGS_CACHE_DIR) + path = Path(self.root) / self.TAGS_CACHE_DIR + if not path.exists(): + self.cache_missing = True + self.TAGS_CACHE = Cache(path) def save_tags_cache(self): pass def load_ident_cache(self): - self.IDENT_CACHE = Cache(Path(self.root) / self.IDENT_CACHE_DIR) + path = Path(self.root) / self.IDENT_CACHE_DIR + if not path.exists(): + self.cache_missing = True + self.IDENT_CACHE = Cache(path) def save_ident_cache(self): pass @@ -291,7 +300,13 @@ class RepoMap: fnames = set(chat_fnames).union(set(other_fnames)) chat_rel_fnames = set() - for fname in sorted(fnames): + fnames = sorted(fnames) + + if self.cache_missing: + fnames = tqdm(fnames) + self.cache_missing = False + + for fname in fnames: # dump(fname) rel_fname = os.path.relpath(fname, self.root) From 9dfc23c6608977cd255abe6f52ab270ef74558fd Mon Sep 17 00:00:00 2001 From: Paul Gauthier Date: Thu, 3 Aug 2023 12:43:08 -0300 Subject: [PATCH 4/4] copy --- docs/faq.md | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 51560f120..8ca60e4ee 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -186,14 +186,29 @@ see there's a base coder with base prompts, and then there are a number of different specific coder implementations. -While it's not yet documented how to add new coder subsystems, you may be able -to modify an existing implementation or use it as a template to add another. - If you're thinking about experimenting with system prompts this document about [benchmarking GPT-3.5 and GPT-4 on code editing](https://aider.chat/docs/benchmarks.html) might be useful background. +While it's not well documented how to add new coder subsystems, you may be able +to modify an existing implementation or use it as a template to add another. + +To get started, try looking at and modifying these files. + +The wholefile coder is currently used by GPT-3.5 by defauly. You can manually select it with `--edit-format whole`. + +- wholefile_coder.py +- wholefile_prompts.py + +The editblock coder is currently used by GPT-4 by default. You can manually select it with `--edit-format diff`. + +- editblock_coder.py +- editblock_prompts.py + +When experimenting with coder backends, it helps to run aider with `--verbose --no-pretty` so you can see +all the raw information being sent to/from GPT in the conversation. + ## Can I run aider in Google Colab? User [imabutahersiddik](https://github.com/imabutahersiddik)