Skip to content

Commit 7602f8b

Browse files
committed
Add task summary
1 parent bbde9b6 commit 7602f8b

File tree

7 files changed

+103
-22
lines changed

7 files changed

+103
-22
lines changed

FEATURES.md

+7
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
# Features
22

3+
# usage-limit
4+
- 完善使用情况统计:支持各家 API,支持细粒度
5+
- 时间统计
6+
- 对话次数统计
7+
8+
可以配置三种数据的最大值,超过就停止任务。
9+
310
## shiv
411
支持 shiv 打包
512

aipython/aipy/agent.py

+28-2
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from rich.live import Live
1212
from rich.panel import Panel
1313
from rich.align import Align
14+
from rich.table import Table
1415
from rich.syntax import Syntax
1516
from rich.console import Console
1617
from rich.markdown import Markdown
@@ -173,6 +174,32 @@ def box(self, title, content, align=None, lang=None):
173174
content = Align(content, align=align)
174175
self._console.print(Panel(content, title=title))
175176

177+
def print_summary(self):
178+
history = self.llm.history
179+
table = Table(title=T("Task Summary"), show_lines=True)
180+
181+
table.add_column(T("Round"), justify="center", style="bold cyan", no_wrap=True)
182+
table.add_column(T("Time(s)"), justify="right")
183+
table.add_column(T("In Tokens"), justify="right")
184+
table.add_column(T("Out Tokens"), justify="right")
185+
table.add_column(T("Total Tokens"), justify="right", style="bold magenta")
186+
187+
round = 1
188+
for row in history.get_usage():
189+
table.add_row(
190+
str(round),
191+
str(row["time"]),
192+
str(row["input_tokens"]),
193+
str(row["output_tokens"]),
194+
str(row["total_tokens"]),
195+
)
196+
round += 1
197+
self._console.print("\n")
198+
self._console.print(table)
199+
summary = history.get_summary()
200+
summary = "{rounds} | {time}s | Tokens: {input_tokens}/{output_tokens}/{total_tokens}".format(**summary)
201+
self._console.print(f"\n⏹ [cyan]{T('end_instruction')} | {summary}")
202+
176203
def __call__(self, instruction, llm=None):
177204
"""
178205
执行自动处理循环,直到 LLM 不再返回代码消息
@@ -188,8 +215,7 @@ def __call__(self, instruction, llm=None):
188215
if msg['type'] != MsgType.CODE:
189216
break
190217
response = self.process_code_reply(msg, llm)
191-
total_token = self.llm.history.total_tokens
192-
self._console.print(f"\n{T('end_instruction')} | Tokens: {total_token}")
218+
self.print_summary()
193219
os.write(1, b'\a\a\a')
194220

195221
def chat(self, prompt):

aipython/aipy/i18n.py

+13-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
'publish_disabled': "当前环境不支持发布",
3030
'auto_confirm': '自动确认',
3131
'packages_exist': '申请的第三方包已经安装',
32-
'thinking': '正在努力思考中,请稍等6-60秒',
32+
'thinking': '正在绞尽脑汁思考中,请稍等6-60秒',
3333
'no_available_llm': '没有可用的 LLM,请检查配置文件',
3434
'banner1_python': "请用 ai('任务') 输入需要 AI 处理的任务 (输入 ai.use(llm) 切换 下述 LLM:",
3535
'banner1': "请输入需要 AI 处理的任务 (输入 /use llm 切换 下述LLM)",
@@ -38,6 +38,12 @@
3838
'ai_mode_enter': '进入 AI 模式,开始处理任务,输入 Ctrl+d 或 /done 结束任务',
3939
'ai_mode_exit': "[退出 AI 模式]",
4040
'ai_mode_unknown_command': "[AI 模式] 未知命令",
41+
'Task Summary': '任务总结',
42+
'Round': '轮次',
43+
'Time(s)': '时间(秒)',
44+
'In Tokens': '输入令牌数',
45+
'Out Tokens': '输出令牌数',
46+
'Total Tokens': '总令牌数',
4147
},
4248
'en': {
4349
'start_instruction': 'Start processing instruction',
@@ -73,6 +79,12 @@
7379
'ai_mode_enter': 'Enter AI mode, start processing tasks, enter Ctrl+d or /done to end the task',
7480
'ai_mode_exit': "[Exit AI mode]",
7581
'ai_mode_unknown_command': "[AI mode] Unknown command",
82+
'Task Summary': 'Task Summary',
83+
'Round': 'Round',
84+
'Time(s)': 'Time(s)',
85+
'In Tokens': 'In Tokens',
86+
'Out Tokens': 'Out Tokens',
87+
'Total Tokens': 'Total Tokens',
7688
}
7789
}
7890

aipython/aipy/llm.py

+45-15
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#!/usr/bin/env python
22
# -*- coding: utf-8 -*-
33

4+
import time
45
from collections import Counter, defaultdict
56
from dataclasses import dataclass, field
67
from abc import ABC, abstractmethod
@@ -21,7 +22,7 @@ class ChatMessage:
2122
class ChatHistory:
2223
def __init__(self):
2324
self.messages = []
24-
self._total_tokens = 0
25+
self._total_tokens = Counter()
2526

2627
def __len__(self):
2728
return len(self.messages)
@@ -31,11 +32,15 @@ def add(self, role, content):
3132

3233
def add_message(self, message: ChatMessage):
3334
self.messages.append(message)
34-
self._total_tokens += message.usage["total_tokens"]
35+
self._total_tokens += message.usage
3536

36-
@property
37-
def total_tokens(self):
38-
return self._total_tokens
37+
def get_usage(self):
38+
return iter(row.usage for row in self.messages if row.role == "assistant")
39+
40+
def get_summary(self):
41+
summary = dict(self._total_tokens)
42+
summary['rounds'] = sum(1 for row in self.messages if row.role == "assistant")
43+
return summary
3944

4045
def get_messages(self):
4146
return [{"role": msg.role, "content": msg.content} for msg in self.messages]
@@ -60,6 +65,10 @@ def get_completion(self, messages):
6065
def add_system_prompt(self, history, system_prompt):
6166
history.add("system", system_prompt)
6267

68+
@abstractmethod
69+
def parse_usage(self, response):
70+
pass
71+
6372
@abstractmethod
6473
def parse_response(self, response):
6574
pass
@@ -70,16 +79,23 @@ def __call__(self, history, prompt, system_prompt=None):
7079
self.add_system_prompt(history, system_prompt)
7180
history.add("user", prompt)
7281

82+
start = time.time()
7383
response = self.get_completion(history.get_messages())
84+
end = time.time()
7485
if response:
7586
msg = self.parse_response(response)
87+
usage = self.parse_usage(response)
88+
usage['time'] = round(end - start, 3)
89+
msg.usage = usage
7690
history.add_message(msg)
7791
if msg.reason:
7892
response = f"{T('think')}:\n---\n{msg.reason}\n---\n{msg.content}"
7993
else:
8094
response = msg.content
8195
return response
82-
96+
97+
# https://platform.openai.com/docs/api-reference/chat/create
98+
# https://api-docs.deepseek.com/api/create-chat-completion
8399
class OpenAIClient(BaseClient):
84100
def __init__(self, config):
85101
super().__init__(config)
@@ -88,15 +104,20 @@ def __init__(self, config):
88104
def add_system_prompt(self, history, system_prompt):
89105
history.add("system", system_prompt)
90106

107+
def parse_usage(self, response):
108+
usage = response.usage
109+
return Counter({'total_tokens': usage.total_tokens,
110+
'input_tokens': usage.prompt_tokens,
111+
'output_tokens': usage.completion_tokens})
112+
91113
def parse_response(self, response):
92-
usage = response.usage.model_dump()
93114
message = response.choices[0].message
94115
reason = getattr(message, "reasoning_content", None)
95116
return ChatMessage(
96117
role=message.role,
97118
content=message.content,
98-
reason=reason,
99-
usage=Counter(usage))
119+
reason=reason
120+
)
100121

101122
def get_completion(self, messages):
102123
try:
@@ -110,12 +131,18 @@ def get_completion(self, messages):
110131
self.console.print(f"❌ [bold red]{self.name} API {T('call_failed')}: [yellow]{str(e)}")
111132
response = None
112133
return response
113-
134+
135+
# https://github.com/ollama/ollama/blob/main/docs/api.md
114136
class OllamaClient(BaseClient):
115137
def __init__(self, config):
116138
super().__init__(config)
117139
self._session = requests.Session()
118140

141+
def parse_usage(self, response):
142+
ret = Counter({'input_tokens': response['prompt_eval_count'], 'output_tokens': response['eval_count']})
143+
ret['total_tokens'] = ret['input_tokens'] + ret['output_tokens']
144+
return ret
145+
119146
def parse_response(self, response):
120147
msg = response["message"]
121148
return ChatMessage(role=msg['role'], content=msg['content'])
@@ -139,19 +166,22 @@ def get_completion(self, messages):
139166
response = None
140167
return response
141168

169+
# https://docs.anthropic.com/en/api/messages
142170
class ClaudeClient(BaseClient):
143171
def __init__(self, config):
144172
super().__init__(config)
145173
self._client = anthropic.Anthropic(api_key=self._api_key, timeout=self._timeout)
146174

175+
def parse_usage(self, response):
176+
usage = response.usage
177+
ret = Counter({'input_tokens': usage.input_tokens, 'output_tokens': usage.output_tokens})
178+
ret['total_tokens'] = ret['input_tokens'] + ret['output_tokens']
179+
return ret
180+
147181
def parse_response(self, response):
148-
usage = Counter(response.usage)
149182
content = response.content[0].text
150183
role = response.role
151-
return ChatMessage(
152-
role=role,
153-
content=content,
154-
usage=usage)
184+
return ChatMessage(role=role, content=content)
155185

156186
def add_system_prompt(self, history, system_prompt):
157187
self._system_prompt = system_prompt

aipython/saas.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,16 @@ def input_with_possible_multiline(self, prompt_text, is_ai=False):
4242
break
4343
return "\n".join(lines)
4444

45+
def run_ai_task(self, task):
46+
try:
47+
self.ai(task)
48+
except Exception as e:
49+
self.console.print(f"[bold red]Error: {e}")
50+
4551
def run_ai_mode(self, initial_text):
4652
ai = self.ai
4753
self.console.print(f"{T('ai_mode_enter')}", style="cyan")
48-
ai(initial_text)
54+
self.run_ai_task(initial_text)
4955
while True:
5056
try:
5157
user_input = self.input_with_possible_multiline(">>> ", is_ai=True).strip()
@@ -63,7 +69,7 @@ def run_ai_mode(self, initial_text):
6369
else:
6470
self.console.print(f"{T('ai_mode_unknown_command')}", style="cyan")
6571
else:
66-
ai(user_input)
72+
self.run_ai_task(user_input)
6773
try:
6874
ai.publish(verbose=False)
6975
except Exception as e:

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "aipython"
3-
version = "0.1.6"
3+
version = "0.1.7"
44
description = "AIPython: AI-Powered Python & Python-Powered AI"
55
readme = "README.md"
66
requires-python = ">=3.11"

uv.lock

+1-1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)