5
5
import asyncio
6
6
from dataclasses import dataclass , field
7
7
from datetime import datetime
8
- from typing import Optional , Dict , Any
8
+ from typing import Dict , Any
9
9
10
- from loguru import logger
11
10
from pydantic import BaseModel
12
11
13
12
from ceylon .llm .models import Model , ModelSettings , ModelMessage
14
13
from ceylon .llm .models .support .messages import MessageRole , TextPart
15
- from ceylon .task .agent import TaskExecutionAgent
16
- from ceylon .task .data import TaskMessage , TaskStatus
14
+ from ceylon .processor .agent import ProcessWorker
15
+ from ceylon .processor .data import ProcessRequest
17
16
18
17
19
18
@dataclass
@@ -22,6 +21,7 @@ class LLMResponse:
22
21
metadata : Dict [str , Any ] = field (default_factory = dict )
23
22
timestamp : float = field (default_factory = lambda : datetime .now ().timestamp ())
24
23
24
+
25
25
class LLMConfig (BaseModel ):
26
26
system_prompt : str
27
27
temperature : float = 0.7
@@ -34,25 +34,26 @@ class LLMConfig(BaseModel):
34
34
class Config :
35
35
arbitrary_types_allowed = True
36
36
37
- class LLMAgent (TaskExecutionAgent ):
37
+
38
+ class LLMAgent (ProcessWorker ):
38
39
"""
39
40
An agent that processes tasks using configurable LLM capabilities.
40
41
Supports multiple LLM backends through the Model interface.
41
42
"""
43
+
42
44
def __init__ (
43
45
self ,
44
46
name : str ,
45
47
llm_model : Model ,
46
48
config : LLMConfig ,
47
- worker_role : str = "llm_processor" ,
49
+ role : str = "llm_processor" ,
48
50
max_concurrent_tasks : int = 3
49
51
):
50
52
super ().__init__ (
51
53
name = name ,
52
- worker_role = worker_role ,
53
- max_concurrent_tasks = max_concurrent_tasks
54
+ role = role
54
55
)
55
- self .llm_model = llm_model
56
+ self .llm_model : Model = llm_model
56
57
self .config = config
57
58
self .response_cache : Dict [str , LLMResponse ] = {}
58
59
self .processing_lock = asyncio .Lock ()
@@ -65,169 +66,25 @@ def __init__(
65
66
)
66
67
)
67
68
68
- async def execute_task (self , task : TaskMessage ) -> None :
69
- """
70
- Execute an LLM task with retry logic and error handling
71
- """
72
- try :
73
- logger .info (f"\n { '=' * 80 } " )
74
- logger .info (f"Task: { task .name } " )
75
- logger .info (f"Description: { task .instructions } " )
76
- logger .info (f"{ '=' * 80 } \n " )
77
-
78
- async with self .processing_lock :
79
- response = await self ._execute_with_retry (task )
80
-
81
- if response :
82
- # Cache successful response
83
- self .response_cache [task .id ] = response
84
-
85
- # Print the response
86
- logger .info ("\n Generated Content:" )
87
- logger .info (f"{ '-' * 80 } " )
88
- logger .info (response .content )
89
- logger .info (f"{ '-' * 80 } \n " )
90
-
91
- # Update task with completion info
92
- task .completed = True
93
- task .end_time = datetime .now ().timestamp ()
94
-
95
- # Include response in task metadata
96
- if not task .metadata :
97
- task .metadata = {}
98
- task .metadata ['llm_response' ] = response .content
99
- task .result = response .content
100
- task .metadata ['response_timestamp' ] = response .timestamp
101
- task .metadata .update (response .metadata )
102
-
103
- logger .info (f"{ self .name } : Completed task { task .id } " )
104
-
105
- # Remove from active tasks and broadcast completion
106
- del self .active_tasks [task .id ]
107
- await self .broadcast_message (task )
108
-
109
- # Request new task
110
- await self .request_task ("standard" )
111
- else :
112
- raise Exception ("Failed to get valid LLM response" )
113
-
114
- except Exception as e :
115
- logger .error (f"Error executing LLM task { task .id } : { e } " )
116
- task .status = TaskStatus .FAILED
117
- task .metadata = task .metadata or {}
118
- task .metadata ['error' ] = str (e )
119
- await self .broadcast_message (task )
120
-
121
- async def _execute_with_retry (self , task : TaskMessage ) -> Optional [LLMResponse ]:
122
- """
123
- Execute LLM call with configured retry logic
124
- """
125
- last_error = None
126
-
127
- for attempt in range (self .config .retry_attempts ):
128
- try :
129
- response = await self ._call_llm (task )
130
-
131
- if response and response .content :
132
- if await self .validate_response (response , task ):
133
- return response
134
- else :
135
- raise ValueError ("Response validation failed" )
136
-
137
- except Exception as e :
138
- last_error = e
139
- logger .warning (f"Attempt { attempt + 1 } failed: { e } " )
140
- if attempt < self .config .retry_attempts - 1 :
141
- await asyncio .sleep (self .config .retry_delay * (attempt + 1 ))
142
-
143
- if last_error :
144
- raise last_error
145
- return None
146
-
147
- async def _call_llm (self , task : TaskMessage ) -> LLMResponse :
148
- """
149
- Make the actual LLM API call using the configured model
150
- """
151
- try :
152
- async with asyncio .timeout (self .config .timeout ):
153
- # Construct messages for the model
154
- messages = [
155
- ModelMessage (
156
- role = MessageRole .SYSTEM ,
157
- parts = [TextPart (text = self .config .system_prompt )]
158
- ),
159
- ModelMessage (
160
- role = MessageRole .USER ,
161
- parts = [TextPart (text = self ._format_task_prompt (task ))]
162
- )
69
+ async def _processor (self , request : ProcessRequest , time : int ):
70
+ message_list = [
71
+ ModelMessage (
72
+ role = MessageRole .SYSTEM ,
73
+ parts = [
74
+ TextPart (text = self .config .system_prompt )
163
75
]
164
-
165
- # Make the model request
166
- response , usage = await self .llm_model .request (
167
- messages = messages ,
168
- context = self .model_context
169
- )
170
-
171
- # Extract text from response parts
172
- response_text = ""
173
- for part in response .parts :
174
- if hasattr (part , 'text' ):
175
- response_text += part .text
176
-
177
- return LLMResponse (
178
- content = response_text ,
179
- metadata = {
180
- 'task_id' : task .id ,
181
- 'usage' : usage .__dict__ ,
182
- 'model_name' : self .llm_model .model_name
183
- }
184
- )
185
-
186
- except asyncio .TimeoutError :
187
- raise TimeoutError (f"LLM call timed out after { self .config .timeout } s" )
188
- except Exception as e :
189
- raise Exception (f"LLM call failed: { str (e )} " )
190
-
191
- def _format_task_prompt (self , task : TaskMessage ) -> str :
192
- """
193
- Format the task into a prompt for the LLM
194
- """
195
- prompt_parts = [
196
- f"Task: { task .name } " ,
197
- f"Description: { task .instructions } "
76
+ ),
77
+ ModelMessage (
78
+ role = MessageRole .USER ,
79
+ parts = [
80
+ TextPart (text = request .data )
81
+ ]
82
+ )
198
83
]
199
84
200
- # Add any task-specific metadata to prompt
201
- if task .metadata :
202
- for key , value in task .metadata .items ():
203
- if key in ['type' , 'topic' , 'style' , 'target_length' ]:
204
- prompt_parts .append (f"{ key .title ()} : { value } " )
205
-
206
- return "\n " .join (prompt_parts )
207
-
208
- async def validate_response (self , response : LLMResponse , task : TaskMessage ) -> bool :
209
- """
210
- Validate LLM response format and content
211
- Override this method to implement custom validation logic
212
- """
213
- if not response or not response .content :
214
- return False
215
-
216
- # Basic length validation
217
- if task .metadata and 'target_length' in task .metadata :
218
- target_length = task .metadata ['target_length' ]
219
- actual_length = len (response .content .split ())
220
- if actual_length < target_length * 0.5 or actual_length > target_length * 1.5 :
221
- logger .warning (f"Response length { actual_length } words outside target range of { target_length } " )
222
- return False
223
-
224
- # Add custom validation logic here
225
- return True
85
+ return await self .llm_model .request (message_list , self .model_context )
226
86
227
- async def close (self ) -> None :
228
- """
229
- Clean up resources when agent is stopped
230
- """
87
+ async def stop (self ) -> None :
231
88
if self .llm_model :
232
89
await self .llm_model .close ()
233
- await super ().close ()
90
+ await super ().stop ()
0 commit comments