Skip to content

Commit 2d98382

Browse files
committed
v0.1
1 parent 8acd05f commit 2d98382

10 files changed

+265
-0
lines changed

README.md

+40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# Ask How CLI
2+
3+
A CLI that converts natural language to shell commands using Ollama.
4+
5+
6+
## Installation
7+
8+
Requires:
9+
- Python3.4+
10+
- Ollama
11+
12+
```bash
13+
git clone https://github.com/ColabDog/how-ai-cli.git
14+
cd how-ai-cli
15+
pip install .
16+
```
17+
18+
## Using Ask AI CLI
19+
20+
To use the AI CLI, simply put a "#" in front of the natural language query.
21+
22+
```bash
23+
how "List all files"
24+
```
25+
26+
### Design Considerations
27+
28+
Why not a "# List all files"?
29+
- Often with CLI programs or instructions, many programs use "#" for commenting so I didn't end up choosing.
30+
31+
Why Ollama?
32+
- Your CLI shouldn't be fed into into non-local models where possible. Ollama's development team is also fairly impressive in their delivery
33+
speed and quality of shipping making them a great tool choice.
34+
35+
Why not use instructor?
36+
- Instructor did not really fit this use case as LLMs tend to be able to reliably produce bash code snippets
37+
with backticks.
38+
39+
Why not use online LLMs?
40+
- There are alternative solutions that do this already.

app/__init__.py

Whitespace-only changes.

app/cli.py

+39
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import subprocess
2+
import sys
3+
from app.query_ollama import query_ollama
4+
from app.extract_bash import extract_bash_commands
5+
from app.menu import row_based_rich_menu
6+
7+
8+
def process_query(query: str):
9+
response = query_ollama(query)
10+
bash_commands = extract_bash_commands(response)
11+
if len(bash_commands) == 0:
12+
print("No command could be produced")
13+
return 0
14+
selected_bash_command = row_based_rich_menu(bash_commands)
15+
print(f"Received: {query}")
16+
try:
17+
result = subprocess.run(selected_bash_command, check=True, shell=True)
18+
return result
19+
except subprocess.CalledProcessError as e:
20+
print(f"An error occurred while executing the command: {e}")
21+
return 1
22+
23+
24+
def main():
25+
if len(sys.argv) > 1:
26+
query = " ".join(sys.argv[1:])
27+
return process_query(query)
28+
else:
29+
print("Please provide a query.")
30+
sys.exit(1)
31+
32+
33+
if __name__ == "__main__":
34+
import argparse
35+
36+
parser = argparse.ArgumentParser()
37+
parser.add_argument("query")
38+
args = parser.parse_args()
39+
main(args.query)

app/config.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
DEFAULT_MODEL = "llama:3.1"

app/extract_bash.py

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
"""Given an LLM response, extract bash commands.
2+
"""
3+
4+
import re
5+
6+
7+
def extract_bash_commands(llm_response: str) -> list:
8+
"""
9+
Extracts a list of bash commands from a given LLM response.
10+
11+
Args:
12+
llm_response (str): The response from the LLM containing the bash commands.
13+
14+
Returns:
15+
list: A list of the extracted bash commands.
16+
"""
17+
# Start of the bash script in the response
18+
19+
commands = []
20+
pattern = re.compile(r"```(?:bash)?(.*?)```", re.DOTALL)
21+
matches = pattern.findall(llm_response)
22+
for match in matches:
23+
commands.extend(match.strip().split("\n"))
24+
25+
return commands
26+
27+
28+
if __name__ == "__main__":
29+
from query_ollama import query_ollama
30+
31+
user_input = "List files"
32+
if user_input.lower() == "exit":
33+
print("Exiting. Goodbye!")
34+
command = query_ollama(user_input)
35+
print(f"Generated command: {command}")
36+
commands = extract_bash_commands(command)
37+
print("Commands:")
38+
print(commands)

app/menu.py

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from rich.console import Console
2+
from rich.panel import Panel
3+
from rich.prompt import Prompt
4+
from rich.text import Text
5+
6+
console = Console()
7+
8+
9+
def row_based_rich_menu(options: list) -> str:
10+
11+
menu_items = []
12+
for i, option in enumerate(options, 1):
13+
item = Text.assemble((f"{i}. ", "bold magenta"), (option, "cyan"))
14+
menu_items.append(item)
15+
16+
console.print(
17+
Panel(
18+
"\n".join(str(item) for item in menu_items),
19+
title="[bold blue]Main Menu[/bold blue]",
20+
expand=False,
21+
border_style="bold green",
22+
)
23+
)
24+
25+
choice = Prompt.ask(
26+
"[bold yellow]Enter your choice[/bold yellow]",
27+
choices=[str(i) for i in range(1, len(options) + 1)],
28+
show_choices=False,
29+
)
30+
31+
return options[int(choice) - 1]

app/query_ollama.py

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
"""Query Ollama model
2+
"""
3+
4+
import requests
5+
import json
6+
import subprocess
7+
8+
9+
def init_ollama():
10+
subprocess.Popen(
11+
["ollama", "start"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
12+
)
13+
subprocess.Popen(
14+
["ollama", "serve"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
15+
)
16+
17+
18+
def query_ollama(prompt, stream: bool = True):
19+
url = "http://localhost:11434/api/generate"
20+
data = {
21+
"model": "llama3.1",
22+
"prompt": f"Convert the following natural language command to a bash command: {prompt}",
23+
"stream": stream,
24+
}
25+
26+
if stream:
27+
full_response = ""
28+
with requests.post(url, json=data, stream=True) as response:
29+
for line in response.iter_lines():
30+
if line:
31+
json_response = json.loads(line)
32+
chunk = json_response.get("response", "")
33+
full_response += chunk
34+
print(chunk, end="", flush=True)
35+
if json_response.get("done", False):
36+
print() # Print a newline at the end
37+
break
38+
return full_response.strip()
39+
else:
40+
response = requests.post(url, json=data)
41+
if response.status_code == 200:
42+
response_text = response.json()["response"].strip()
43+
print(response_text)
44+
return response_text
45+
else:
46+
error_message = f"Error: Unable to get response from Ollama. Status code: {response.status_code}"
47+
print(error_message)
48+
return error_message
49+
50+
51+
if __name__ == "__main__":
52+
user_input = "List files"
53+
if user_input.lower() == "exit":
54+
print("Exiting. Goodbye!")
55+
command = query_ollama(user_input, True)
56+
print(f"Generated command: {command}")

requirements.txt

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
requests
2+
rich

setup.py

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
from setuptools import setup, find_packages
2+
3+
with open("README.md", "r", encoding="utf-8") as fh:
4+
long_description = fh.read()
5+
6+
with open("requirements.txt", "r") as req_file:
7+
requirements = req_file.read().splitlines()
8+
9+
10+
setup(
11+
name="how",
12+
version="0.0.1",
13+
author="Jacky W",
14+
author_email="[email protected]",
15+
description="A CLI tool that uses Ollama to convert natural language to CLI commands.",
16+
long_description=long_description,
17+
long_description_content_type="text/markdown",
18+
url="https://github.com/yourusername/ollama-cli",
19+
packages=find_packages(),
20+
classifiers=[
21+
"Development Status :: 3 - Alpha",
22+
"Intended Audience :: Developers",
23+
"License :: OSI Approved :: MIT License",
24+
"Operating System :: OS Independent",
25+
"Programming Language :: Python :: 3",
26+
"Programming Language :: Python :: 3.7",
27+
"Programming Language :: Python :: 3.8",
28+
"Programming Language :: Python :: 3.9",
29+
],
30+
python_requires=">=3.4",
31+
install_requires=requirements,
32+
entry_points={
33+
"console_scripts": [
34+
"how=app.cli:main",
35+
],
36+
},
37+
)

typescript

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
Script started on Sun Jul 28 22:31:45 2024
2+
% jackywong@Jackys-MBP-2 ask-ai-cli % [?2004hhhistory [?2004l
3+
1178 ls
4+
1179 rm -rf temp
5+
1180 mkdir temp
6+
1181 cd temp
7+
1182 ls
8+
1183 reflex init --template chat
9+
1184 ls
10+
1185 reflex run
11+
1186 ls
12+
1187 ls
13+
1188 ls
14+
1189 ls
15+
1190 ls
16+
1191 sudo sl
17+
1192 sudo ls
18+
1193 ls
19+
% jackywong@Jackys-MBP-2 ask-ai-cli % [?2004heexit[?2004l
20+
21+
Script done on Sun Jul 28 22:32:05 2024

0 commit comments

Comments
 (0)