Skip to content

Commit

Permalink
0.56
Browse files Browse the repository at this point in the history
  • Loading branch information
justUmen committed Nov 11, 2024
1 parent 4da1a28 commit 840e62d
Show file tree
Hide file tree
Showing 3 changed files with 83 additions and 70 deletions.
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.54 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v0.56 🔗

A list of 61 custom nodes for Comfyui : Display, manipulate, and edit text, images, videos, loras and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
Expand Down Expand Up @@ -262,8 +262,9 @@ cd /where/you/installed/ComfyUI && python main.py
- **v0.49**: New node : Loop Sequential (Integer) - Loop through a range of integer values. (But once per workflow run), audio sync is smarter and adapt the video duration to the audio duration.
- **v0.50**: allow audio in Images to Video path (tmp video). Add three new nodes : Concat Videos, combine video/audio and Loop Sequential (input Lines). save text changes to write inside Comfyui folder. Fix random line from input outputing LIST. ❗ Breaking change to audio/video sync node, allowing different types as input.
- **v0.51**: Fix some issues with audio/video sync node. Add two new nodes : merge images/videos vertical and horizontal. add requirements.txt and ollama_ip.txt
- **v0.52-53**: Rever name git to Bjornulf_custom_nodes, match registry comfy
- **v0.52-53**: Revert name git to Bjornulf_custom_nodes, match registry comfy
- **v0.54-55**: add opencv-python to requirements.txt
- **0.56**: ❗Breaking changes : ollama node simplified, no ollama_ip.txt needed, waiting for collection ollama nodes to be ready.

# 📝 Nodes descriptions

Expand Down
146 changes: 79 additions & 67 deletions ollama.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,64 @@
import ollama
from ollama import Client # pip install ollama
from ollama import Client
import logging
import hashlib
import os

class ollamaLoader:
@classmethod
def read_host_from_file(cls, filename='ollama_ip.txt'):
try:
# Get the directory where the script is located
script_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(script_dir, filename)

# Print the constructed file path for verification
print(f"Looking for file at: {file_path}")
# _available_models = None # Class variable to cache models

# @classmethod
# def read_host_from_file(cls, filename='ollama_ip.txt'):
# try:
# script_dir = os.path.dirname(os.path.realpath(__file__))
# file_path = os.path.join(script_dir, filename)
# print(f"Looking for file at: {file_path}")

# with open(file_path, 'r') as f:
# host = f.read().strip()
# if host:
# logging.info(f"Using host from {file_path}: {host}")
# return host
# else:
# logging.warning(f"{file_path} is empty. Falling back to default hosts.")
# except Exception as e:
# logging.error(f"Failed to read host from {file_path}: {e}")
# return None

with open(file_path, 'r') as f:
host = f.read().strip()
if host:
logging.info(f"Using host from {file_path}: {host}")
return host
else:
logging.warning(f"{file_path} is empty. Falling back to default hosts.")
except Exception as e:
logging.error(f"Failed to read host from {file_path}: {e}")
return None # Return None if reading fails
# @classmethod
# def get_available_models(cls):
# # Return cached models if available
# if cls._available_models is not None:
# return cls._available_models

# models = ["none"] # Default fallback
# host = cls.read_host_from_file()

# def try_connect(host_url):
# try:
# client = Client(host=host_url)
# list_models = client.list()
# return [model['name'] for model in list_models['models']]
# except Exception as e:
# logging.error(f"Error fetching models from {host_url}: {e}")
# return None

@classmethod
def get_available_models(cls):
host = cls.read_host_from_file()
if host:
try:
client = Client(host=host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e:
logging.error(f"Error fetching models from {host}: {e}")
# # Try user-specified host first
# if host:
# result = try_connect(host)
# if result:
# models = result

# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e:
logging.error(f"Error fetching models from {default_host}: {e}")
return ["none"] # Return a default model if fetching fails
# # Try default hosts if necessary
# if models == ["none"]:
# for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
# result = try_connect(default_host)
# if result:
# models = result
# break

# cls._available_models = models # Cache the results
# return models

@classmethod
def INPUT_TYPES(cls):
Expand All @@ -59,10 +72,12 @@ def INPUT_TYPES(cls):
"yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, "
"very large nose."
)
# Lazy load models only when the node is actually used
return {
"required": {
"user_prompt": ("STRING", {"multiline": True}),
"selected_model": (cls.get_available_models(),),
"selected_model": ("STRING", {"default": "llama3.2:1b"} ), # Default to none, will be populated when node is used
"ollama_url": ("STRING", {"default": "http://0.0.0.0:11434"}),
"system_prompt": ("STRING", {
"multiline": True,
"default": default_system_prompt
Expand All @@ -79,50 +94,47 @@ def INPUT_TYPES(cls):

def __init__(self):
self.last_content_hash = None
# # Update available models when the node is actually instantiated
# self.__class__._available_models = self.get_available_models()

def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed):
# Generate a hash of the current content
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram,ollama_url, seed):
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()

# Check if the content has changed
if content_hash != self.last_content_hash:
# Content has changed, use the provided seed
self.last_content_hash = content_hash
else:
# Content hasn't changed, set seed to None to prevent randomization
seed = None

keep_alive_minutes = 1 if keep_1min_in_vram else 0

host = self.read_host_from_file()
if host:
# host = self.read_host_from_file()
host = ollama_url

def try_generate(host_url):
try:
client = Client(host=host)
client = Client(host=host_url)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({host}): {response['response']}")
return (response['response'],)
logging.info(f"Ollama response ({host_url}): {response['response']}")
return response['response']
except Exception as e:
logging.error(f"Connection to {host} failed: {e}")
logging.error(f"Connection to {host_url} failed: {e}")
return None

# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({default_host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to {default_host} failed: {e}")
# Try user-specified host first
if host:
result = try_generate(host)
if result:
return (result,)

# Try default hosts
# for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
# result = try_generate(default_host)
# if result:
# return (result,)

logging.error("All connection attempts failed.")
return ("Connection to Ollama failed.",)
return ("Connection to Ollama failed.",)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "61 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech."
version = "0.55"
version = "0.56"
license = {file = "LICENSE"}

[project.urls]
Expand Down

0 comments on commit 840e62d

Please sign in to comment.