Skip to content

Commit

Permalink
Merge pull request #591 from haesleinhuepf/provider-prefixes
Browse files Browse the repository at this point in the history
Support for provider prefixes
  • Loading branch information
haesleinhuepf authored Feb 16, 2025
2 parents 93984d8 + 314ba53 commit 3f15450
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 12 deletions.
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,13 @@ There is a detailed [tutorial](https://github.com/haesleinhuepf/git-bob/blob/mai
* Make sure to replace `pip install -e .` with a specific git-bob version such as `pip install git-bob==0.16.0`.
* If your project does not contain a `requirements.txt` file, remove the line `pip install -r requirements.txt`.
* Configure the LLM you want to use in the workflow files by specifying the `GIT_BOB_LLM_NAME` environment variable. These were tested:
* `claude-3-5-sonnet-20241022`
* `gpt-4o-2024-08-06`
* `anthropic:claude-3-5-sonnet-20241022`
* `openai:gpt-4o-2024-08-06`
* `github_models:gpt-4o`
* `github_models:meta-llama-3.1-405b-instruct`
* `gemini-1.5-pro-002`
* `mistral-large-2411` (uses `pixtral-12b-2409` for vision tasks)
* `deepseek-chat`
* `google:gemini-1.5-pro-002`
* `mistral:mistral-large-2411` (uses `pixtral-12b-2409` for vision tasks)
* `mistral:deepseek-chat`
* configure a GitHub secret with the corresponding key from the LLM provider depending on the above configured LLM:
* `OPENAI_API_KEY`: [OpenAI (gpt)](https://openai.com/blog/openai-api)
* `ANTHROPIC_API_KEY`: [Anthropic (claude)](https://www.anthropic.com/api)
Expand Down
3 changes: 3 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,13 @@ console_scripts =
git-bob-remote = git_bob._terminal:remote_interface

git_bob.prompt_handlers =
openai = git_bob._endpoints:prompt_openai
gpt-4o = git_bob._endpoints:prompt_openai
gpt4o = git_bob._endpoints:prompt_openai
o1 = git_bob._endpoints:prompt_openai
anthropic = git_bob._endpoints:prompt_anthropic
claude = git_bob._endpoints:prompt_anthropic
google = git_bob._endpoints:prompt_googleai
gemini = git_bob._endpoints:prompt_googleai
mistral = git_bob._endpoints:prompt_mistral
pixtral = git_bob._endpoints:prompt_mistral
Expand Down
2 changes: 1 addition & 1 deletion src/git_bob/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.18.1"
__version__ = "0.19.0"


__all__ = (
Expand Down
23 changes: 17 additions & 6 deletions src/git_bob/_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def command_line_interface():
if value is not None:
try:
signature = inspect.signature(value)
model_aliases[key] = signature.parameters['model'].default
model_aliases[key] = key + ":" + signature.parameters['model'].default
except:
continue
print("model aliases:\n", model_aliases)
Expand Down Expand Up @@ -106,7 +106,7 @@ def handler(signum, frame):
print("text: ", text)
print(f"{agent_name} ask in text", f"{agent_name} ask" in text)

# handle ask-llm task option
# handle ask-llm task option (using model names or aliases to select the LLM)
if f"{agent_name} ask" in text:
# example:
# git-bob ask gpt-4o to solve this issue -> git-bob solve this issue
Expand All @@ -125,10 +125,21 @@ def handler(signum, frame):

prompt_function = None
prompt_handlers = init_prompt_handlers() # reinitialize, because configured LLM may have changed
for key, value in prompt_handlers.items():
if key in Config.llm_name:
prompt_function = partial(value, model=Config.llm_name)
break

# search for the leading model provider (left of : )
if ":" in Config.llm_name:
provider = Config.llm_name.split(":")[0]
for key, value in prompt_handlers.items():
if key == provider:
Log().log(f"Selecting prompt handler by provider name ({provider}): " + value.__name__)
prompt_function = partial(value, model=Config.llm_name)
break
else:
for key, value in prompt_handlers.items():
if key in Config.llm_name:
Log().log("Selecting prompt handler by llm_name: " + value.__name__)
prompt_function = partial(value, model=Config.llm_name)
break

if prompt_function is None:
llm_name = Config.llm_name[1:]
Expand Down

0 comments on commit 3f15450

Please sign in to comment.