diff --git a/Cargo.toml b/Cargo.toml index 4541029..adb9eb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,3 +30,4 @@ regex = "1.10.4" serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" tokio = { version = "1.38.0", features = ["full"] } +genai = "=0.1.1" diff --git a/README.md b/README.md index 940d1ac..2a6ca41 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![](./assets/shelldon.jpeg) -Shelldon is a command-line tool written in Rust. It provides a set of utilities for executing shell commands, managing prompts, and interacting with OpenAI GPT. +Shelldon is a command-line tool written in Rust. It provides utilities for executing shell commands, managing prompts, and interacting with multiple LLMs. Yes, another CLI with GPT features. Shelldon is not intended to be a full GPT client from the terminal; there are a couple of CLIs much better for that and also a lot of applications and even the OpenAI ChatGPT apps. Shelldon is to solve some personal use cases and it is very useful for me; I hope it could be useful for you too. Also, I made it to have fun playing with Rust! @@ -32,10 +32,16 @@ cargo build --release ## Usage -To use Shelldon, you’ll need to set your OpenAI token. You can do this by setting an environment variable. Here’s how you can set it in your terminal: +Shelldon supports different AI providers such as Ollama, OpenAI, Gemini, Anthropic, and Cohere. You can control which provider to use with the `--model` flag. For example, `--model claude-3-haiku-20240307` or `--model gemini-1.5-flash-latest`. By default, Shelldon uses `gpt-4o` as the model. + +To use Shelldon, you need to set your API keys for the mentioned providers. You can do this by setting an environment variable. Here’s how to set it in your terminal: ```sh -export OPENAI_API_KEY="your-openai-api-key" +export OPENAI_API_KEY="api-key" +export ANTHROPIC_API_KEY="api-key" +export COHERE_API_KEY="api-key" +export GEMINI_API_KEY="api-key" + ``` Shelldon allows you to integrate GPT features into your shell commands easily. Here are some examples to get you started: @@ -43,7 +49,7 @@ Shelldon allows you to integrate GPT features into your shell commands easily. H ### Running Shell Commands ```sh -$ shelldon exec "Show all the graphics ports for the Vagrant machine using Libvirt." +$ shelldon exec "Show all the graphics ports for the Vagrant machine using Libvirt." --model gpt-4o Command to execute: vagrant ssh -c "virsh list --all | grep vagrant | awk '{print \$1}' | xargs -I {} virsh domdisplay {}" ? [R]un, [M]odify, [C]opy, [A]bort › ``` @@ -188,7 +194,7 @@ So the ability to handle dynamic prompts with args and use them makes Shelldon a - [ ] Improve error handling. - [ ] Add default prompts. - [ ] Implement OpenAI functions? -- [ ] Implement Ollama? Maybe in the future. Do you need it? +- [X] Implement Ollama? Maybe in the future. Do you need it? ## Contributing diff --git a/src/backend/genai.rs b/src/backend/genai.rs new file mode 100644 index 0000000..2953c87 --- /dev/null +++ b/src/backend/genai.rs @@ -0,0 +1,59 @@ +use crate::processor::CompletionGenerator; +use crate::{Error, Result}; +use async_stream::stream; +use futures::{stream::LocalBoxStream, StreamExt}; +use genai::{ + chat::{ChatMessage, ChatRequest, ChatStreamEvent, StreamChunk}, + client::Client, +}; + +pub struct GenAI { + client: Client, +} + +impl GenAI { + pub fn new() -> Self { + Self { + client: Client::default(), + } + } +} + +impl CompletionGenerator for GenAI { + async fn generate_completion( + &self, + model: &str, + _temperature: f32, + prompt: &str, + input: &str, + ) -> crate::Result { + let req = ChatRequest::new(vec![ChatMessage::system(prompt), ChatMessage::user(input)]); + let resp = self.client.exec_chat(model, req.clone(), None).await?; + resp.content.ok_or(Error::EmptyResponse) + } + + async fn stream_completion( + &self, + model: &str, + _temperature: f32, + prompt: &str, + input: &str, + ) -> Result> { + let req = ChatRequest::new(vec![ChatMessage::system(prompt), ChatMessage::user(input)]); + let resp = self + .client + .exec_chat_stream(model, req.clone(), None) + .await?; + + let async_stream = stream! { + let mut stream = resp.stream; + while let Some(Ok(stream_event)) = stream.next().await { + if let ChatStreamEvent::Chunk(StreamChunk { content }) = stream_event { + yield content; + } + }; + }; + + Ok(Box::pin(async_stream)) + } +} diff --git a/src/backend/mod.rs b/src/backend/mod.rs index d8c3087..661726c 100644 --- a/src/backend/mod.rs +++ b/src/backend/mod.rs @@ -1 +1,2 @@ +pub mod genai; pub mod openai; diff --git a/src/backend/openai.rs b/src/backend/openai.rs index f307830..a7b990c 100644 --- a/src/backend/openai.rs +++ b/src/backend/openai.rs @@ -9,7 +9,7 @@ use async_openai::{ Client, }; use async_stream::stream; -use futures::stream::BoxStream; +use futures::stream::LocalBoxStream; pub struct OpenAI { client: Client, @@ -70,7 +70,7 @@ impl CompletionGenerator for OpenAI { temperature: f32, prompt: &str, input: &str, - ) -> Result> { + ) -> Result> { let messages = [ ChatCompletionRequestSystemMessageArgs::default() .content(prompt) diff --git a/src/command/ask.rs b/src/command/ask.rs index ba1da61..85a752e 100644 --- a/src/command/ask.rs +++ b/src/command/ask.rs @@ -1,6 +1,6 @@ use super::CommonArgs; use crate::{ - backend::openai::OpenAI, + backend::genai::GenAI, command::{parse_prompt, read_input}, config::Config, processor::CompletionProcessor, @@ -18,7 +18,7 @@ pub struct AskArgs { } pub async fn handle_ask(config: Config, args: AskArgs) -> Result<()> { - let processor = CompletionProcessor::new(OpenAI::new()?); + let processor = CompletionProcessor::new(GenAI::new()); let input = read_input(&args.common.input)?; let prompt = parse_prompt(config, args.common.prompt, args.common.set, "")?; let mut completion = processor diff --git a/src/command/exec.rs b/src/command/exec.rs index 4c5079b..fe72535 100644 --- a/src/command/exec.rs +++ b/src/command/exec.rs @@ -1,6 +1,6 @@ use super::{parse_prompt, read_input, CommonArgs}; use crate::{ - backend::openai::OpenAI, + backend::genai::GenAI, config::Config, processor::CompletionProcessor, system::{self, copy_to_clipboard, run_cmd}, @@ -36,7 +36,7 @@ pub struct ExecArgs { } pub async fn handle_exec(config: Config, args: ExecArgs) -> Result<()> { - let processor = CompletionProcessor::new(OpenAI::new()?); + let processor = CompletionProcessor::new(GenAI::new()); let input = read_input(&args.common.input)?; let default_prompt = SHELL_PROMPT .replace("{shell}", &system::get_current_shell()) diff --git a/src/error.rs b/src/error.rs index 18e9362..799a8b9 100644 --- a/src/error.rs +++ b/src/error.rs @@ -12,6 +12,8 @@ pub enum Error { CommandFailed { command: String }, #[display(fmt = "API key not set")] APIKeyNotSet, + #[display(fmt = "Empty response")] + EmptyResponse, #[from] OpenAI(async_openai::error::OpenAIError), @@ -23,4 +25,6 @@ pub enum Error { Serde(serde_json::Error), #[from] Dialoguer(dialoguer::Error), + #[from] + GenAI(genai::Error), } diff --git a/src/processor.rs b/src/processor.rs index b6d2200..014e502 100644 --- a/src/processor.rs +++ b/src/processor.rs @@ -1,5 +1,5 @@ use crate::Result; -use futures::stream::BoxStream; +use futures::stream::LocalBoxStream; pub trait CompletionGenerator { async fn generate_completion( @@ -16,7 +16,7 @@ pub trait CompletionGenerator { temperature: f32, prompt: &str, input: &str, - ) -> Result>; + ) -> Result>; } pub struct CompletionProcessor { @@ -48,7 +48,7 @@ impl CompletionProcessor { input: &str, model: &str, temperature: f32, - ) -> Result> { + ) -> Result> { self.generator .stream_completion(model, temperature, prompt, input) .await