Skip to content

Commit 33de9aa

Browse files
committed
Create LLM Agents
1 parent 74804da commit 33de9aa

File tree

3 files changed

+51
-601
lines changed

3 files changed

+51
-601
lines changed

bindings/ceylon/ceylon/llm/agent.py

+26-169
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,14 @@
55
import asyncio
66
from dataclasses import dataclass, field
77
from datetime import datetime
8-
from typing import Optional, Dict, Any
8+
from typing import Dict, Any
99

10-
from loguru import logger
1110
from pydantic import BaseModel
1211

1312
from ceylon.llm.models import Model, ModelSettings, ModelMessage
1413
from ceylon.llm.models.support.messages import MessageRole, TextPart
15-
from ceylon.task.agent import TaskExecutionAgent
16-
from ceylon.task.data import TaskMessage, TaskStatus
14+
from ceylon.processor.agent import ProcessWorker
15+
from ceylon.processor.data import ProcessRequest
1716

1817

1918
@dataclass
@@ -22,6 +21,7 @@ class LLMResponse:
2221
metadata: Dict[str, Any] = field(default_factory=dict)
2322
timestamp: float = field(default_factory=lambda: datetime.now().timestamp())
2423

24+
2525
class LLMConfig(BaseModel):
2626
system_prompt: str
2727
temperature: float = 0.7
@@ -34,25 +34,26 @@ class LLMConfig(BaseModel):
3434
class Config:
3535
arbitrary_types_allowed = True
3636

37-
class LLMAgent(TaskExecutionAgent):
37+
38+
class LLMAgent(ProcessWorker):
3839
"""
3940
An agent that processes tasks using configurable LLM capabilities.
4041
Supports multiple LLM backends through the Model interface.
4142
"""
43+
4244
def __init__(
4345
self,
4446
name: str,
4547
llm_model: Model,
4648
config: LLMConfig,
47-
worker_role: str = "llm_processor",
49+
role: str = "llm_processor",
4850
max_concurrent_tasks: int = 3
4951
):
5052
super().__init__(
5153
name=name,
52-
worker_role=worker_role,
53-
max_concurrent_tasks=max_concurrent_tasks
54+
role=role
5455
)
55-
self.llm_model = llm_model
56+
self.llm_model: Model = llm_model
5657
self.config = config
5758
self.response_cache: Dict[str, LLMResponse] = {}
5859
self.processing_lock = asyncio.Lock()
@@ -65,169 +66,25 @@ def __init__(
6566
)
6667
)
6768

68-
async def execute_task(self, task: TaskMessage) -> None:
69-
"""
70-
Execute an LLM task with retry logic and error handling
71-
"""
72-
try:
73-
logger.info(f"\n{'='*80}")
74-
logger.info(f"Task: {task.name}")
75-
logger.info(f"Description: {task.instructions}")
76-
logger.info(f"{'='*80}\n")
77-
78-
async with self.processing_lock:
79-
response = await self._execute_with_retry(task)
80-
81-
if response:
82-
# Cache successful response
83-
self.response_cache[task.id] = response
84-
85-
# Print the response
86-
logger.info("\nGenerated Content:")
87-
logger.info(f"{'-'*80}")
88-
logger.info(response.content)
89-
logger.info(f"{'-'*80}\n")
90-
91-
# Update task with completion info
92-
task.completed = True
93-
task.end_time = datetime.now().timestamp()
94-
95-
# Include response in task metadata
96-
if not task.metadata:
97-
task.metadata = {}
98-
task.metadata['llm_response'] = response.content
99-
task.result = response.content
100-
task.metadata['response_timestamp'] = response.timestamp
101-
task.metadata.update(response.metadata)
102-
103-
logger.info(f"{self.name}: Completed task {task.id}")
104-
105-
# Remove from active tasks and broadcast completion
106-
del self.active_tasks[task.id]
107-
await self.broadcast_message(task)
108-
109-
# Request new task
110-
await self.request_task("standard")
111-
else:
112-
raise Exception("Failed to get valid LLM response")
113-
114-
except Exception as e:
115-
logger.error(f"Error executing LLM task {task.id}: {e}")
116-
task.status = TaskStatus.FAILED
117-
task.metadata = task.metadata or {}
118-
task.metadata['error'] = str(e)
119-
await self.broadcast_message(task)
120-
121-
async def _execute_with_retry(self, task: TaskMessage) -> Optional[LLMResponse]:
122-
"""
123-
Execute LLM call with configured retry logic
124-
"""
125-
last_error = None
126-
127-
for attempt in range(self.config.retry_attempts):
128-
try:
129-
response = await self._call_llm(task)
130-
131-
if response and response.content:
132-
if await self.validate_response(response, task):
133-
return response
134-
else:
135-
raise ValueError("Response validation failed")
136-
137-
except Exception as e:
138-
last_error = e
139-
logger.warning(f"Attempt {attempt + 1} failed: {e}")
140-
if attempt < self.config.retry_attempts - 1:
141-
await asyncio.sleep(self.config.retry_delay * (attempt + 1))
142-
143-
if last_error:
144-
raise last_error
145-
return None
146-
147-
async def _call_llm(self, task: TaskMessage) -> LLMResponse:
148-
"""
149-
Make the actual LLM API call using the configured model
150-
"""
151-
try:
152-
async with asyncio.timeout(self.config.timeout):
153-
# Construct messages for the model
154-
messages = [
155-
ModelMessage(
156-
role=MessageRole.SYSTEM,
157-
parts=[TextPart(text=self.config.system_prompt)]
158-
),
159-
ModelMessage(
160-
role=MessageRole.USER,
161-
parts=[TextPart(text=self._format_task_prompt(task))]
162-
)
69+
async def _processor(self, request: ProcessRequest, time: int):
70+
message_list = [
71+
ModelMessage(
72+
role=MessageRole.SYSTEM,
73+
parts=[
74+
TextPart(text=self.config.system_prompt)
16375
]
164-
165-
# Make the model request
166-
response, usage = await self.llm_model.request(
167-
messages=messages,
168-
context=self.model_context
169-
)
170-
171-
# Extract text from response parts
172-
response_text = ""
173-
for part in response.parts:
174-
if hasattr(part, 'text'):
175-
response_text += part.text
176-
177-
return LLMResponse(
178-
content=response_text,
179-
metadata={
180-
'task_id': task.id,
181-
'usage': usage.__dict__,
182-
'model_name': self.llm_model.model_name
183-
}
184-
)
185-
186-
except asyncio.TimeoutError:
187-
raise TimeoutError(f"LLM call timed out after {self.config.timeout}s")
188-
except Exception as e:
189-
raise Exception(f"LLM call failed: {str(e)}")
190-
191-
def _format_task_prompt(self, task: TaskMessage) -> str:
192-
"""
193-
Format the task into a prompt for the LLM
194-
"""
195-
prompt_parts = [
196-
f"Task: {task.name}",
197-
f"Description: {task.instructions}"
76+
),
77+
ModelMessage(
78+
role=MessageRole.USER,
79+
parts=[
80+
TextPart(text=request.data)
81+
]
82+
)
19883
]
19984

200-
# Add any task-specific metadata to prompt
201-
if task.metadata:
202-
for key, value in task.metadata.items():
203-
if key in ['type', 'topic', 'style', 'target_length']:
204-
prompt_parts.append(f"{key.title()}: {value}")
205-
206-
return "\n".join(prompt_parts)
207-
208-
async def validate_response(self, response: LLMResponse, task: TaskMessage) -> bool:
209-
"""
210-
Validate LLM response format and content
211-
Override this method to implement custom validation logic
212-
"""
213-
if not response or not response.content:
214-
return False
215-
216-
# Basic length validation
217-
if task.metadata and 'target_length' in task.metadata:
218-
target_length = task.metadata['target_length']
219-
actual_length = len(response.content.split())
220-
if actual_length < target_length * 0.5 or actual_length > target_length * 1.5:
221-
logger.warning(f"Response length {actual_length} words outside target range of {target_length}")
222-
return False
223-
224-
# Add custom validation logic here
225-
return True
85+
return await self.llm_model.request(message_list, self.model_context)
22686

227-
async def close(self) -> None:
228-
"""
229-
Clean up resources when agent is stopped
230-
"""
87+
async def stop(self) -> None:
23188
if self.llm_model:
23289
await self.llm_model.close()
233-
await super().close()
90+
await super().stop()

0 commit comments

Comments
 (0)