Skip to content

Commit

Permalink
gradio logging
Browse files Browse the repository at this point in the history
  • Loading branch information
EnigmaCurry committed Oct 23, 2024
1 parent 6ab5ab8 commit 780b8e7
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 13 deletions.
23 changes: 21 additions & 2 deletions gradio/gradio/chatbot/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,27 @@ def get_config(key, default=None):
return value

def get_logger(name):
log = logging.getLogger(name)
return log
class Logger:
def __init__(self, name):
self.name = name

def info(self, message):
print(f"[INFO] {self.name}: {message}")
sys.stdout.flush()

def error(self, message):
print(f"[ERROR] {self.name}: {message}", file=sys.stderr)
sys.stderr.flush()

def warning(self, message):
print(f"[WARNING] {self.name}: {message}")
sys.stdout.flush()

def debug(self, message):
print(f"[DEBUG] {self.name}: {message}")
sys.stdout.flush()

return Logger(name)

def invalid_config(variable, error_message):
"Log error for invalid configuration and quit"
Expand Down
30 changes: 19 additions & 11 deletions gradio/gradio/chatbot/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@

# Function to stream responses from the LLM, maintaining context
def query_llm(input_text, history):
# Log the user input
log.debug(f"User Input: {input_text}")

# Include the entire conversation history in the payload
messages = [{"role": "user", "content": user_message} if i % 2 == 0 else {"role": "assistant", "content": assistant_message}
for i, (user_message, assistant_message) in enumerate(history)]
Expand All @@ -27,35 +30,40 @@ def query_llm(input_text, history):

# Check if the response is valid
if response.status_code == 200:
partial_response = ""
full_response = ""

# Stream the response in chunks
for line in response.iter_lines():
if line:
# Decode and load the JSON from each 'data:' chunk
line_str = line.decode('utf-8')

# Skip the DONE message
if line_str.strip() == "data: [DONE]":
break

if line_str.startswith("data:"):
data_json = line_str[6:] # Remove 'data: ' prefix
data = json.loads(data_json)

# Extract and append content from the "delta"
delta_content = data["choices"][0]["delta"].get("content", "")
partial_response += delta_content
full_response += delta_content

# Yield the partial response so far
yield partial_response
yield full_response

# Log the complete response after all chunks are received
log.debug(f"Assistant Full Response: {full_response}")
else:
yield f"Error: {response.status_code}"
error_message = f"Error: {response.status_code}"
log.error(error_message)
yield error_message

# Using gr.ChatInterface to handle streaming
chatbot = gr.ChatInterface(fn=query_llm, title="LM Studio Chat")

print("Launching gradio interface ...")
print("Launching Gradio interface ...")
sys.stdout.flush()

chatbot.launch(server_name="0.0.0.0", server_port=7860)

0 comments on commit 780b8e7

Please sign in to comment.