Merge pull request #797 from paul-gauthier/help
Get interactive help with /help <question>
8
.github/workflows/pages.yml
vendored
|
@ -11,7 +11,7 @@ on:
|
|||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- "website/**"
|
||||
- "aider/website/**"
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
@ -33,7 +33,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: website
|
||||
working-directory: aider/website
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
@ -43,7 +43,7 @@ jobs:
|
|||
ruby-version: '3.3' # Not needed with a .ruby-version file
|
||||
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
|
||||
cache-version: 0 # Increment this number if you need to re-download cached gems
|
||||
working-directory: '${{ github.workspace }}/website'
|
||||
working-directory: '${{ github.workspace }}/aider/website'
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v3
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
# Automatically uploads an artifact from the './_site' directory by default
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
with:
|
||||
path: "website/_site/"
|
||||
path: "aider/website/_site/"
|
||||
|
||||
# Deployment job
|
||||
deploy:
|
||||
|
|
|
@ -29,7 +29,7 @@ and works best with GPT-4o, Claude 3.5 Sonnet, Claude 3 Opus and DeepSeek Coder
|
|||
<!--[[[cog
|
||||
# We can't do this here: {% include get-started.md %}
|
||||
# Because this page is rendered by GitHub as the repo README
|
||||
cog.out(open("website/_includes/get-started.md").read())
|
||||
cog.out(open("aider/website/_includes/get-started.md").read())
|
||||
]]]-->
|
||||
|
||||
You can get started quickly like this:
|
||||
|
|
|
@ -96,6 +96,10 @@ class YamlHelpFormatter(argparse.HelpFormatter):
|
|||
# Place in your home dir, or at the root of your git repo.
|
||||
##########################################################
|
||||
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the yaml
|
||||
# config file. Keys for all APIs can be stored in a .env file
|
||||
# https://aider.chat/docs/config/dotenv.html
|
||||
|
||||
"""
|
||||
|
||||
def _format_action(self, action):
|
||||
|
|
|
@ -2,6 +2,7 @@ from .base_coder import Coder
|
|||
from .editblock_coder import EditBlockCoder
|
||||
from .editblock_fenced_coder import EditBlockFencedCoder
|
||||
from .editblock_func_coder import EditBlockFunctionCoder
|
||||
from .help_coder import HelpCoder
|
||||
from .single_wholefile_func_coder import SingleWholeFileFunctionCoder
|
||||
from .udiff_coder import UnifiedDiffCoder
|
||||
from .wholefile_coder import WholeFileCoder
|
||||
|
@ -16,4 +17,5 @@ __all__ = [
|
|||
EditBlockFunctionCoder,
|
||||
SingleWholeFileFunctionCoder,
|
||||
UnifiedDiffCoder,
|
||||
HelpCoder,
|
||||
]
|
||||
|
|
|
@ -4,6 +4,7 @@ import hashlib
|
|||
import json
|
||||
import mimetypes
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
|
@ -76,11 +77,13 @@ class Coder:
|
|||
edit_format=None,
|
||||
io=None,
|
||||
from_coder=None,
|
||||
summarize_from_coder=True,
|
||||
**kwargs,
|
||||
):
|
||||
from . import (
|
||||
EditBlockCoder,
|
||||
EditBlockFencedCoder,
|
||||
HelpCoder,
|
||||
UnifiedDiffCoder,
|
||||
WholeFileCoder,
|
||||
)
|
||||
|
@ -108,7 +111,7 @@ class Coder:
|
|||
# confused the new LLM. It may try and imitate it, disobeying
|
||||
# the system prompt.
|
||||
done_messages = from_coder.done_messages
|
||||
if edit_format != from_coder.edit_format and done_messages:
|
||||
if edit_format != from_coder.edit_format and done_messages and summarize_from_coder:
|
||||
done_messages = from_coder.summarizer.summarize_all(done_messages)
|
||||
|
||||
# Bring along context from the old Coder
|
||||
|
@ -132,6 +135,8 @@ class Coder:
|
|||
res = WholeFileCoder(main_model, io, **kwargs)
|
||||
elif edit_format == "udiff":
|
||||
res = UnifiedDiffCoder(main_model, io, **kwargs)
|
||||
elif edit_format == "help":
|
||||
res = HelpCoder(main_model, io, **kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown edit format {edit_format}")
|
||||
|
||||
|
@ -555,18 +560,16 @@ class Coder:
|
|||
files_reply = "Ok, any changes I propose will be to those files."
|
||||
elif repo_content:
|
||||
files_content = self.gpt_prompts.files_no_full_files_with_repo_map
|
||||
files_reply = (
|
||||
"Ok, based on your requests I will suggest which files need to be edited and then"
|
||||
" stop and wait for your approval."
|
||||
)
|
||||
files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply
|
||||
else:
|
||||
files_content = self.gpt_prompts.files_no_full_files
|
||||
files_reply = "Ok."
|
||||
|
||||
files_messages += [
|
||||
dict(role="user", content=files_content),
|
||||
dict(role="assistant", content=files_reply),
|
||||
]
|
||||
if files_content:
|
||||
files_messages += [
|
||||
dict(role="user", content=files_content),
|
||||
dict(role="assistant", content=files_reply),
|
||||
]
|
||||
|
||||
images_message = self.get_images_message()
|
||||
if images_message is not None:
|
||||
|
@ -730,7 +733,15 @@ class Coder:
|
|||
def fmt_system_prompt(self, prompt):
|
||||
lazy_prompt = self.gpt_prompts.lazy_prompt if self.main_model.lazy else ""
|
||||
|
||||
prompt = prompt.format(fence=self.fence, lazy_prompt=lazy_prompt)
|
||||
platform_text = (
|
||||
f"The user's system is `{platform.platform()}` according to python platform.platform()"
|
||||
)
|
||||
|
||||
prompt = prompt.format(
|
||||
fence=self.fence,
|
||||
lazy_prompt=lazy_prompt,
|
||||
platform=platform_text,
|
||||
)
|
||||
return prompt
|
||||
|
||||
def format_messages(self):
|
||||
|
@ -739,7 +750,8 @@ class Coder:
|
|||
|
||||
example_messages = []
|
||||
if self.main_model.examples_as_sys_msg:
|
||||
main_sys += "\n# Example conversations:\n\n"
|
||||
if self.gpt_prompts.example_messages:
|
||||
main_sys += "\n# Example conversations:\n\n"
|
||||
for msg in self.gpt_prompts.example_messages:
|
||||
role = msg["role"]
|
||||
content = self.fmt_system_prompt(msg["content"])
|
||||
|
|
|
@ -28,6 +28,11 @@ Only include the files that are most likely to actually need to be edited.
|
|||
Don't include files that might contain relevant context, just files that will need to be changed.
|
||||
""" # noqa: E501
|
||||
|
||||
files_no_full_files_with_repo_map_reply = (
|
||||
"Ok, based on your requests I will suggest which files need to be edited and then"
|
||||
" stop and wait for your approval."
|
||||
)
|
||||
|
||||
repo_content_prefix = """Here are summaries of some files present in my git repository.
|
||||
Do not propose changes to these files, treat them as *read-only*.
|
||||
If you need to edit any of these files, ask me to *add them to the chat* first.
|
||||
|
|
17
aider/coders/help_coder.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
from ..dump import dump # noqa: F401
|
||||
from .base_coder import Coder
|
||||
from .help_prompts import HelpPrompts
|
||||
|
||||
|
||||
class HelpCoder(Coder):
|
||||
edit_format = "help"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.gpt_prompts = HelpPrompts()
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def get_edits(self, mode="update"):
|
||||
return []
|
||||
|
||||
def apply_edits(self, edits):
|
||||
pass
|
44
aider/coders/help_prompts.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
# flake8: noqa: E501
|
||||
|
||||
from .base_prompts import CoderPrompts
|
||||
|
||||
|
||||
class HelpPrompts(CoderPrompts):
|
||||
main_system = """You are an expert on the AI coding tool called Aider.
|
||||
Answer the user's questions about how to use aider.
|
||||
|
||||
The user is currently chatting with you using aider, to write and edit code.
|
||||
|
||||
Use the provided aider documentation *if it is relevant to the user's question*.
|
||||
|
||||
Include a bulleted list of urls to the aider docs that might be relevant for the user to read.
|
||||
Include *bare* urls. *Do not* make [markdown links](http://...).
|
||||
For example:
|
||||
- https://aider.chat/docs/usage.html
|
||||
- https://aider.chat/docs/faq.html
|
||||
|
||||
If you don't know the answer, say so and suggest some relevant aider doc urls.
|
||||
|
||||
If asks for something that isn't possible with aider, be clear about that.
|
||||
Don't suggest a solution that isn't supported.
|
||||
|
||||
Be helpful but concise.
|
||||
|
||||
Unless the question indicates otherwise, assume the user wants to use aider as a CLI tool.
|
||||
{platform}
|
||||
"""
|
||||
|
||||
example_messages = []
|
||||
system_reminder = ""
|
||||
|
||||
files_content_prefix = """These are some files we have been discussing that we may want to edit after you answer my questions:
|
||||
"""
|
||||
|
||||
files_no_full_files = "I am not sharing any files with you."
|
||||
|
||||
files_no_full_files_with_repo_map = ""
|
||||
files_no_full_files_with_repo_map_reply = ""
|
||||
|
||||
repo_content_prefix = """Here are summaries of some files present in my git repository.
|
||||
We may look at these in more detail after you answer my questions.
|
||||
"""
|
|
@ -7,6 +7,7 @@ from pathlib import Path
|
|||
import git
|
||||
|
||||
from aider import models, prompts, voice
|
||||
from aider.help import Help
|
||||
from aider.llm import litellm
|
||||
from aider.scrape import Scraper
|
||||
from aider.utils import is_image_file
|
||||
|
@ -32,6 +33,8 @@ class Commands:
|
|||
|
||||
self.voice_language = voice_language
|
||||
|
||||
self.help = None
|
||||
|
||||
def cmd_model(self, args):
|
||||
"Switch to a new LLM"
|
||||
|
||||
|
@ -625,32 +628,73 @@ class Commands:
|
|||
for file in chat_files:
|
||||
self.io.tool_output(f" {file}")
|
||||
|
||||
def cmd_help(self, args):
|
||||
"Show help about all commands"
|
||||
def basic_help(self):
|
||||
commands = sorted(self.get_commands())
|
||||
pad = max(len(cmd) for cmd in commands)
|
||||
pad = "{cmd:" + str(pad) + "}"
|
||||
for cmd in commands:
|
||||
cmd_method_name = f"cmd_{cmd[1:]}"
|
||||
cmd_method = getattr(self, cmd_method_name, None)
|
||||
cmd = pad.format(cmd=cmd)
|
||||
if cmd_method:
|
||||
description = cmd_method.__doc__
|
||||
self.io.tool_output(f"{cmd} {description}")
|
||||
else:
|
||||
self.io.tool_output(f"{cmd} No description available.")
|
||||
self.io.tool_output()
|
||||
self.io.tool_output("Use `/help <question>` to ask questions about how to use aider.")
|
||||
|
||||
def cmd_help(self, args):
|
||||
"Ask questions about aider"
|
||||
|
||||
if not args.strip():
|
||||
self.basic_help()
|
||||
return
|
||||
|
||||
from aider.coders import Coder
|
||||
|
||||
if not self.help:
|
||||
self.help = Help()
|
||||
|
||||
coder = Coder.create(
|
||||
main_model=self.coder.main_model,
|
||||
io=self.io,
|
||||
from_coder=self.coder,
|
||||
edit_format="help",
|
||||
summarize_from_coder=False,
|
||||
)
|
||||
user_msg = self.help.ask(args)
|
||||
user_msg += """
|
||||
# Announcement lines from when this session of aider was launched:
|
||||
|
||||
"""
|
||||
user_msg += "\n".join(self.coder.get_announcements()) + "\n"
|
||||
|
||||
assistant_msg = coder.run(user_msg)
|
||||
|
||||
self.coder.cur_messages += [
|
||||
dict(role="user", content=user_msg),
|
||||
dict(role="assistant", content=assistant_msg),
|
||||
]
|
||||
|
||||
def get_help_md(self):
|
||||
"Show help about all commands in markdown"
|
||||
|
||||
res = ""
|
||||
res = """
|
||||
|Command|Description|
|
||||
|:------|:----------|
|
||||
"""
|
||||
commands = sorted(self.get_commands())
|
||||
for cmd in commands:
|
||||
cmd_method_name = f"cmd_{cmd[1:]}"
|
||||
cmd_method = getattr(self, cmd_method_name, None)
|
||||
if cmd_method:
|
||||
description = cmd_method.__doc__
|
||||
res += f"- **{cmd}** {description}\n"
|
||||
res += f"| **{cmd}** | {description} |\n"
|
||||
else:
|
||||
res += f"- **{cmd}**\n"
|
||||
res += f"| **{cmd}** | |\n"
|
||||
|
||||
res += "\n"
|
||||
return res
|
||||
|
||||
def cmd_voice(self, args):
|
||||
|
|
120
aider/help.py
Executable file
|
@ -0,0 +1,120 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import importlib_resources
|
||||
|
||||
from aider import __version__
|
||||
from aider.dump import dump # noqa: F401
|
||||
from aider.help_pats import exclude_website_pats
|
||||
|
||||
warnings.simplefilter("ignore", category=FutureWarning)
|
||||
|
||||
|
||||
def get_package_files():
|
||||
for path in importlib_resources.files("aider.website").iterdir():
|
||||
if path.is_file():
|
||||
yield path
|
||||
elif path.is_dir():
|
||||
for subpath in path.rglob("*.md"):
|
||||
yield subpath
|
||||
|
||||
|
||||
def fname_to_url(filepath):
|
||||
website = "website/"
|
||||
index = "/index.md"
|
||||
md = ".md"
|
||||
|
||||
docid = ""
|
||||
if filepath.startswith("website/_includes/"):
|
||||
pass
|
||||
elif filepath.startswith(website):
|
||||
docid = filepath[len(website) :]
|
||||
|
||||
if filepath.endswith(index):
|
||||
filepath = filepath[: -len(index)] + "/"
|
||||
elif filepath.endswith(md):
|
||||
filepath = filepath[: -len(md)] + ".html"
|
||||
|
||||
docid = "https://aider.chat/" + filepath
|
||||
|
||||
return docid
|
||||
|
||||
|
||||
def get_index():
|
||||
from llama_index.core import (
|
||||
Document,
|
||||
StorageContext,
|
||||
VectorStoreIndex,
|
||||
load_index_from_storage,
|
||||
)
|
||||
from llama_index.core.node_parser import MarkdownNodeParser
|
||||
|
||||
dname = Path.home() / ".aider" / "caches" / ("help." + __version__)
|
||||
|
||||
if dname.exists():
|
||||
storage_context = StorageContext.from_defaults(
|
||||
persist_dir=dname,
|
||||
)
|
||||
index = load_index_from_storage(storage_context)
|
||||
else:
|
||||
parser = MarkdownNodeParser()
|
||||
|
||||
nodes = []
|
||||
for fname in get_package_files():
|
||||
fname = Path(fname)
|
||||
if any(fname.match(pat) for pat in exclude_website_pats):
|
||||
continue
|
||||
|
||||
doc = Document(
|
||||
text=importlib_resources.files("aider.website")
|
||||
.joinpath(fname)
|
||||
.read_text(encoding="utf-8"),
|
||||
metadata=dict(
|
||||
filename=fname.name,
|
||||
extension=fname.suffix,
|
||||
url=fname_to_url(str(fname)),
|
||||
),
|
||||
)
|
||||
nodes += parser.get_nodes_from_documents([doc])
|
||||
|
||||
index = VectorStoreIndex(nodes, show_progress=True)
|
||||
dname.parent.mkdir(parents=True, exist_ok=True)
|
||||
index.storage_context.persist(dname)
|
||||
|
||||
return index
|
||||
|
||||
|
||||
class Help:
|
||||
def __init__(self):
|
||||
from llama_index.core import Settings
|
||||
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
||||
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
|
||||
|
||||
index = get_index()
|
||||
|
||||
self.retriever = index.as_retriever(similarity_top_k=20)
|
||||
|
||||
def ask(self, question):
|
||||
nodes = self.retriever.retrieve(question)
|
||||
|
||||
context = f"""# Question: {question}
|
||||
|
||||
# Relevant docs:
|
||||
|
||||
"""
|
||||
|
||||
for node in nodes:
|
||||
url = node.metadata.get("url", "")
|
||||
if url:
|
||||
url = f' from_url="{url}"'
|
||||
|
||||
context += f"<doc{url}>\n"
|
||||
context += node.text
|
||||
context += "\n</doc>\n\n"
|
||||
|
||||
return context
|
10
aider/help_pats.py
Normal file
|
@ -0,0 +1,10 @@
|
|||
exclude_website_pats = [
|
||||
"examples/**",
|
||||
"_posts/**",
|
||||
"HISTORY.md",
|
||||
"docs/benchmarks*md",
|
||||
"docs/ctags.md",
|
||||
"docs/unified-diffs.md",
|
||||
"docs/leaderboards/index.md",
|
||||
"assets/**",
|
||||
]
|
31
aider/tests/test_help.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
import unittest
|
||||
|
||||
from aider.help import Help
|
||||
|
||||
|
||||
class TestHelp(unittest.TestCase):
|
||||
def test_init(self):
|
||||
help_inst = Help()
|
||||
self.assertIsNotNone(help_inst.retriever)
|
||||
|
||||
def test_ask_without_mock(self):
|
||||
help_instance = Help()
|
||||
question = "What is aider?"
|
||||
result = help_instance.ask(question)
|
||||
|
||||
self.assertIn(f"# Question: {question}", result)
|
||||
self.assertIn("<doc", result)
|
||||
self.assertIn("</doc>", result)
|
||||
self.assertGreater(len(result), 100) # Ensure we got a substantial response
|
||||
|
||||
# Check for some expected content (adjust based on your actual help content)
|
||||
self.assertIn("aider", result.lower())
|
||||
self.assertIn("ai", result.lower())
|
||||
self.assertIn("chat", result.lower())
|
||||
|
||||
# Assert that there are more than 5 <doc> entries
|
||||
self.assertGreater(result.count("<doc"), 5)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -8,7 +8,7 @@ import aider
|
|||
|
||||
|
||||
def check_version(print_cmd):
|
||||
fname = Path.home() / ".aider/versioncheck"
|
||||
fname = Path.home() / ".aider" / "caches" / "versioncheck"
|
||||
day = 60 * 60 * 24
|
||||
if fname.exists() and time.time() - fname.stat().st_mtime < day:
|
||||
return
|
||||
|
@ -35,8 +35,7 @@ def check_version(print_cmd):
|
|||
else:
|
||||
print_cmd(f"{py} -m pip install --upgrade aider-chat")
|
||||
|
||||
if not fname.parent.exists():
|
||||
fname.parent.mkdir()
|
||||
fname.parent.mkdir(parents=True, exist_ok=True)
|
||||
fname.touch()
|
||||
return is_update_available
|
||||
except Exception as err:
|
||||
|
|
|
@ -37,3 +37,8 @@ nav_external_links:
|
|||
url: "https://discord.gg/Tv2uQnR88V"
|
||||
|
||||
repository: paul-gauthier/aider
|
||||
|
||||
callouts:
|
||||
tip:
|
||||
title: Tip
|
||||
color: green
|
6
aider/website/_includes/env-keys-tip.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
{: .tip }
|
||||
All API keys can be stored in a
|
||||
[.env file](/docs/config/dotenv.html).
|
||||
Only OpenAI and Anthropic keys can be stored in the
|
||||
[YAML config file](/docs/config/aider_conf.html).
|
||||
|
4
aider/website/_includes/help-tip.md
Normal file
|
@ -0,0 +1,4 @@
|
|||
{: .tip }
|
||||
Use `/help <question>` to ask aider about itself.
|
||||
You can ask how to customize settings, troubleshoot errors
|
||||
or use different LLMs.
|
|
@ -5,8 +5,6 @@
|
|||
Model foobar: Unknown context window size and costs, using sane defaults.
|
||||
```
|
||||
|
||||
*You can probably ignore the unknown context window size and token costs warning.*
|
||||
|
||||
If you specify a model that aider has never heard of, you will get
|
||||
this warning.
|
||||
This means aider doesn't know the context window size and token costs
|
||||
|
@ -18,6 +16,9 @@ See the docs on
|
|||
[configuring advanced model settings](/docs/config/adv-model-settings.html)
|
||||
for details on how to remove this warning.
|
||||
|
||||
{: .tip }
|
||||
You can probably ignore the unknown context window size and token costs warning.
|
||||
|
||||
## Did you mean?
|
||||
|
||||
If aider isn't familiar with the model you've specified,
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 55 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 45 KiB After Width: | Height: | Size: 45 KiB |
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 190 KiB |
Before Width: | Height: | Size: 390 KiB After Width: | Height: | Size: 390 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 38 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 54 KiB |
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 144 KiB After Width: | Height: | Size: 144 KiB |
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 44 KiB |
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
Before Width: | Height: | Size: 58 KiB After Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 397 KiB After Width: | Height: | Size: 397 KiB |
Before Width: | Height: | Size: 136 KiB After Width: | Height: | Size: 136 KiB |
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 92 KiB After Width: | Height: | Size: 92 KiB |
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 26 KiB |
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 1.8 KiB After Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 1.8 KiB After Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 139 KiB After Width: | Height: | Size: 139 KiB |
Before Width: | Height: | Size: 344 KiB After Width: | Height: | Size: 344 KiB |
Before Width: | Height: | Size: 154 KiB After Width: | Height: | Size: 154 KiB |
Before Width: | Height: | Size: 111 KiB After Width: | Height: | Size: 111 KiB |
Before Width: | Height: | Size: 86 KiB After Width: | Height: | Size: 86 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 671 KiB After Width: | Height: | Size: 671 KiB |
Before Width: | Height: | Size: 700 KiB After Width: | Height: | Size: 700 KiB |
|
@ -4,6 +4,10 @@
|
|||
# Place in your home dir, or at the root of your git repo.
|
||||
##########################################################
|
||||
|
||||
# Note: You can only put OpenAI and Anthropic API keys in the yaml
|
||||
# config file. Keys for all APIs can be stored in a .env file
|
||||
# https://aider.chat/docs/config/dotenv.html
|
||||
|
||||
##########
|
||||
# options:
|
||||
|
Before Width: | Height: | Size: 115 KiB After Width: | Height: | Size: 115 KiB |
Before Width: | Height: | Size: 162 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 162 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 209 KiB After Width: | Height: | Size: 209 KiB |
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 42 KiB |
Before Width: | Height: | Size: 58 KiB After Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 38 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 84 KiB After Width: | Height: | Size: 84 KiB |