|
| 1 | +"""Query Ollama model |
| 2 | +""" |
| 3 | + |
| 4 | +import requests |
| 5 | +import json |
| 6 | +import subprocess |
| 7 | + |
| 8 | + |
| 9 | +def init_ollama(): |
| 10 | + subprocess.Popen( |
| 11 | + ["ollama", "start"], stdout=subprocess.PIPE, stderr=subprocess.PIPE |
| 12 | + ) |
| 13 | + subprocess.Popen( |
| 14 | + ["ollama", "serve"], stdout=subprocess.PIPE, stderr=subprocess.PIPE |
| 15 | + ) |
| 16 | + |
| 17 | + |
| 18 | +def query_ollama(prompt, stream: bool = True): |
| 19 | + url = "http://localhost:11434/api/generate" |
| 20 | + data = { |
| 21 | + "model": "llama3.1", |
| 22 | + "prompt": f"Convert the following natural language command to a bash command: {prompt}", |
| 23 | + "stream": stream, |
| 24 | + } |
| 25 | + |
| 26 | + if stream: |
| 27 | + full_response = "" |
| 28 | + with requests.post(url, json=data, stream=True) as response: |
| 29 | + for line in response.iter_lines(): |
| 30 | + if line: |
| 31 | + json_response = json.loads(line) |
| 32 | + chunk = json_response.get("response", "") |
| 33 | + full_response += chunk |
| 34 | + print(chunk, end="", flush=True) |
| 35 | + if json_response.get("done", False): |
| 36 | + print() # Print a newline at the end |
| 37 | + break |
| 38 | + return full_response.strip() |
| 39 | + else: |
| 40 | + response = requests.post(url, json=data) |
| 41 | + if response.status_code == 200: |
| 42 | + response_text = response.json()["response"].strip() |
| 43 | + print(response_text) |
| 44 | + return response_text |
| 45 | + else: |
| 46 | + error_message = f"Error: Unable to get response from Ollama. Status code: {response.status_code}" |
| 47 | + print(error_message) |
| 48 | + return error_message |
| 49 | + |
| 50 | + |
| 51 | +if __name__ == "__main__": |
| 52 | + user_input = "List files" |
| 53 | + if user_input.lower() == "exit": |
| 54 | + print("Exiting. Goodbye!") |
| 55 | + command = query_ollama(user_input, True) |
| 56 | + print(f"Generated command: {command}") |
0 commit comments