Skip to content

Commit 0ce456b

Browse files
authored
Merge pull request #98 from codefuse-ai/muagent_dev
add muagen sdk v0.1.0
2 parents b701a8a + 45586d8 commit 0ce456b

File tree

105 files changed

+13656
-177
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

105 files changed

+13656
-177
lines changed

.github/workflows/docker-image-pull.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ jobs:
1111
architecture: [amd64, arm64]
1212
os: [linux]
1313
service:
14-
- name: runtime:0.1.0
15-
- name: muagent:0.1.0
14+
- name: runtime:0.1.1
15+
- name: muagent:0.1.1
1616
- name: ekgfrontend:0.1.0
1717

1818
steps:

.github/workflows/docker-image.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ jobs:
1212
- name: runtime
1313
context: ./runtime
1414
dockerfile: ./runtime/Dockerfile.no-package
15-
tag: ghcr.io/codefuse-ai/runtime:0.1.0
15+
tag: ghcr.io/codefuse-ai/runtime:0.1.1
1616
tag_latest: ghcr.io/codefuse-ai/runtime:latest
1717
- name: ekgfrontend
1818
context: .
@@ -22,7 +22,7 @@ jobs:
2222
- name: ekgservice
2323
context: .
2424
dockerfile: ./Dockerfile_gh
25-
tag: ghcr.io/codefuse-ai/muagent:0.1.0
25+
tag: ghcr.io/codefuse-ai/muagent:0.1.1
2626
tag_latest: ghcr.io/codefuse-ai/muagent:latest
2727

2828
steps:

docker-compose.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ services:
190190
context: .
191191
dockerfile: Dockerfile
192192
container_name: ekgservice
193-
image: muagent:0.1.0
193+
image: muagent:0.1.1
194194
environment:
195195
USER: root
196196
TZ: "${TZ}"

docker_pull_images.sh

+4-4
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@ docker pull redis/redis-stack:7.4.0-v0
1717
docker pull ollama/ollama:0.3.6
1818

1919
# pull images from github ghcr.io by nju
20-
docker pull ghcr.nju.edu.cn/runtime:0.1.0
21-
docker pull ghcr.nju.edu.cn/muagent:0.1.0
20+
docker pull ghcr.nju.edu.cn/runtime:0.1.1
21+
docker pull ghcr.nju.edu.cn/muagent:0.1.1
2222
docker pull ghcr.nju.edu.cn/ekgfrontend:0.1.0
2323

2424
# # pull images from github ghcr.io
25-
# docker pull ghcr.io/runtime:0.1.0
26-
# docker pull ghcr.io/muagent:0.1.0
25+
# docker pull ghcr.io/runtime:0.1.1
26+
# docker pull ghcr.io/muagent:0.1.1
2727
# docker pull ghcr.io/ekgfrontend:0.1.0

examples/ekg_examples/start.py

+68-35
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
import test_config
3838

3939
from muagent.schemas.db import *
40+
from muagent.schemas.apis.ekg_api_schema import LLMFCRequest
4041
from muagent.db_handler import *
4142
from muagent.llm_models.llm_config import EmbedConfig, LLMConfig
4243
from muagent.service.ekg_construct.ekg_construct_base import EKGConstructService
@@ -46,7 +47,8 @@
4647

4748
from pydantic import BaseModel
4849

49-
50+
from muagent.schemas.models import ModelConfig
51+
from muagent.models import get_model
5052

5153

5254
cur_dir = os.path.dirname(__file__)
@@ -92,56 +94,75 @@ def update_params(self, **kwargs):
9294

9395
def _llm_type(self, *args):
9496
return ""
95-
96-
def predict(self, prompt: str, stop = None) -> str:
97-
return self._call(prompt, stop)
98-
99-
def _call(self, prompt: str,
100-
stop = None) -> str:
97+
98+
def _get_model(self):
10199
"""_call
102100
"""
103-
return_str = ""
104-
stop = stop or self.stop
105-
106-
if self.model_type == "ollama":
107-
stream = ollama.chat(
108-
model=self.model_name,
109-
messages=[{'role': 'user', 'content': prompt}],
110-
stream=True,
111-
)
112-
answer = ""
113-
for chunk in stream:
114-
answer += chunk['message']['content']
115-
116-
return answer
117-
elif self.model_type == "openai":
101+
if self.model_type in [
102+
"ollama", "qwen", "openai", "lingyiwanwu",
103+
"kimi", "moonshot",
104+
]:
118105
from muagent.llm_models.openai_model import getChatModelFromConfig
119106
llm_config = LLMConfig(
120107
model_name=self.model_name,
121-
model_engine="openai",
108+
model_engine=self.model_type,
122109
api_key=self.api_key,
123110
api_base_url=self.url,
124111
temperature=self.temperature,
125112
stop=self.stop
126113
)
127114
model = getChatModelFromConfig(llm_config)
128-
return model.predict(prompt, stop=self.stop)
129-
elif self.model_type in ["lingyiwanwu", "kimi", "moonshot", "qwen"]:
130-
from muagent.llm_models.openai_model import getChatModelFromConfig
131-
llm_config = LLMConfig(
115+
else:
116+
model_config = ModelConfig(
117+
model_type=self.model_type,
132118
model_name=self.model_name,
133-
model_engine=self.model_type,
134119
api_key=self.api_key,
135-
api_base_url=self.url,
120+
api_url=self.url,
136121
temperature=self.temperature,
137-
stop=self.stop
138122
)
139-
model = getChatModelFromConfig(llm_config)
140-
return model.predict(prompt, stop=self.stop)
141-
else:
142-
pass
123+
model = get_model(model_config)
124+
return model
125+
126+
def predict(self, prompt: str, stop = None) -> str:
127+
return self._call(prompt, stop)
143128

144-
return return_str
129+
def fc(self, request: LLMFCRequest) -> str:
130+
"""_function_call
131+
"""
132+
if self.model_type not in [
133+
"openai", "ollama", "lingyiwanwu", "kimi", "moonshot", "qwen"
134+
]:
135+
return f"{self.model_type} not in valid model range"
136+
137+
model = self._get_model()
138+
return model.fc(
139+
messages=request.messages,
140+
tools=request.tools,
141+
tool_choice=request.tool_choice,
142+
parallel_tool_calls=request.parallel_tool_calls,
143+
)
144+
145+
def _call(self, prompt: str,
146+
stop = None) -> str:
147+
"""_call
148+
"""
149+
return_str = ""
150+
stop = stop or self.stop
151+
if self.model_type not in [
152+
"openai", "ollama", "lingyiwanwu", "kimi", "moonshot", "qwen"
153+
]:
154+
pass
155+
elif self.model_type not in [
156+
"dashscope_chat", "moonshot_chat", "ollama_chat",
157+
"openai_chat", "qwen_chat", "yi_chat",
158+
"dashscope_text_embedding", "ollama_embedding", "openai_embedding", "qwen_text_embedding"
159+
]:
160+
pass
161+
else:
162+
return f"{self.model_type} not in valid model range"
163+
164+
model = self._get_model()
165+
return model.predict(prompt, stop=self.stop)
145166

146167

147168
class CustomEmbeddings(Embeddings):
@@ -185,6 +206,17 @@ def _get_sentence_emb(self, sentence: str) -> dict:
185206
)
186207
text2vector_dict = get_embedding("openai", [sentence], embed_config=embed_config)
187208
return text2vector_dict[sentence]
209+
elif self.embedding_type in [
210+
"dashscope_text_embedding", "ollama_embedding", "openai_embedding", "qwen_text_embedding"
211+
]:
212+
model_config = ModelConfig(
213+
model_type=self.embedding_type,
214+
model_name=self.model_name,
215+
api_key=self.api_key,
216+
api_url=self.url,
217+
)
218+
model = get_model(model_config)
219+
return model.embed_query(sentence)
188220
else:
189221
pass
190222

@@ -280,6 +312,7 @@ def embed_query(self, text: str) -> List[float]:
280312
llm_config=llm_config,
281313
tb_config=tb_config,
282314
gb_config=gb_config,
315+
initialize_space=True,
283316
clear_history_data=clear_history_data
284317
)
285318

examples/muagent_examples/docchat_example.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@
6060
# create your knowledge base
6161
from muagent.service.kb_api import create_kb, upload_files2kb
6262
from muagent.utils.server_utils import run_async
63-
from muagent.orm import create_tables
63+
# from muagent.orm import create_tables
64+
from muagent.db_handler import create_tables
6465

6566

6667
# use to test, don't create some directory

examples/test_config.py.example

+104-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import os, openai, base64
22
from loguru import logger
33

4+
os.environ["DM_llm_name"] = 'Qwen2_72B_Instruct_OpsGPT' #or gpt_4
5+
46
# 兜底大模型配置
57
OPENAI_API_BASE = "https://api.openai.com/v1"
68
os.environ["API_BASE_URL"] = OPENAI_API_BASE
@@ -19,6 +21,78 @@ os.environ["gpt4-llm_temperature"] = "0.0"
1921

2022

2123

24+
MODEL_CONFIGS = {
25+
# old llm config
26+
"default": {
27+
"model_name": "gpt-3.5-turbo",
28+
"model_engine": "qwen",
29+
"temperature": "0",
30+
"api_key": "",
31+
"api_base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
32+
},
33+
"codefuser":{
34+
"model_name": "gpt-4",
35+
"model_engine": "openai",
36+
"temperature": "0",
37+
"api_key": "",
38+
"api_base_url": OPENAI_API_BASE,
39+
},
40+
# new llm config
41+
"dashscope_chat": {
42+
"model_type": "dashscope_chat",
43+
"model_name": "qwen2.5-72b-instruct" ,
44+
"api_key": "",
45+
},
46+
"moonshot_chat": {
47+
"model_type": "moonshot_chat",
48+
"model_name": "moonshot-v1-8k" ,
49+
"api_key": "",
50+
},
51+
"ollama_chat": {
52+
"model_type": "ollama_chat",
53+
"model_name": "qwen2.5-0.5b",
54+
"api_key": "",
55+
},
56+
"openai_chat": {
57+
"model_type": "openai_chat",
58+
"model_name": "gpt-4",
59+
"api_key": "",
60+
},
61+
"qwen_chat": {
62+
"model_type": "qwen_chat",
63+
"model_name": "qwen2.5-72b-instruct",
64+
"api_key": "",
65+
},
66+
"yi_chat": {
67+
"model_type": "yi_chat",
68+
"model_name": "yi-lightning" ,
69+
"api_key": "",
70+
},
71+
# embedding configs
72+
"dashscope_text_embedding": {
73+
"model_type": "dashscope_text_embedding",
74+
"model_name": "text-embedding-v3",
75+
"api_key": "",
76+
},
77+
"ollama_embedding": {
78+
"model_type": "ollama_embedding",
79+
"model_name": "qwen2.5-0.5b",
80+
"api_key": "",
81+
},
82+
"openai_embedding": {
83+
"model_type": "openai_embedding",
84+
"model_name": "text-embedding-ada-002",
85+
"api_key": "",
86+
},
87+
"qwen_text_embedding": {
88+
"model_type": "dashscope_text_embedding",
89+
"model_name": "text-embedding-v3",
90+
"api_key": "",
91+
},
92+
}
93+
94+
os.environ["MODEL_CONFIGS"] = json.dumps(MODEL_CONFIGS)
95+
2296
#### NebulaHandler ####
2397
os.environ['nb_host'] = 'graphd'
2498
os.environ['nb_port'] = '9669'
@@ -41,8 +115,36 @@ os.environ["tb_index_name"] = "ekg_migration_new"
41115
os.environ['tb_definition_value'] = 'message_test_new'
42116
os.environ['tb_expire_time'] = '604800' #86400*7
43117

44-
# clear history data in tb and gb
45-
os.environ['clear_history_data'] = 'True'
118+
119+
#################
120+
## DB_CONFIGS ##
121+
#################
122+
DB_CONFIGS = {
123+
"gb_config": {
124+
"gb_type": "NebulaHandler",
125+
"extra_kwargs": {
126+
'host':'graphd',
127+
'port': '9669',
128+
'username': os.environ['nb_username'],
129+
'password': os.environ['nb_password'],
130+
'space': "client"
131+
}
132+
},
133+
"tb_config": {
134+
"tb_type": 'TBaseHandler',
135+
"index_name": "opsgptkg",
136+
"host": 'redis-stack',
137+
"port": '6379',
138+
"username": os.environ['tb_username'],
139+
"password": os.environ['tb_password'],
140+
"extra_kwargs": {
141+
"definition_value": "opsgptkg",
142+
"memory_definition_value": "opsgptkg_message"
143+
}
144+
}
145+
}
146+
os.environ["DB_CONFIGS"] = json.dumps(DB_CONFIGS)
147+
46148

47149

48150
########################################

muagent/__init__.py

+11-7
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
1-
# encoding: utf-8
2-
'''
3-
@author: 温进
4-
@file: __init__.py.py
5-
@time: 2023/11/9 下午4:01
6-
@desc:
7-
'''
1+
from .ekg_project import EKG, get_ekg_project_config_from_env
2+
from .project_manager import get_project_config_from_env
3+
from .models import get_model
4+
from .agents import get_agent
5+
from .tools import get_tool
6+
7+
__all__ = [
8+
"EKG", "get_model", "get_agent", "get_tool",
9+
"get_ekg_project_config_from_env",
10+
"get_project_config_from_env"
11+
]

muagent/agents/__init__.py

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from .base_agent import BaseAgent
2+
from .single_agent import SingleAgent
3+
from .react_agent import ReactAgent
4+
from .task_agent import TaskAgent
5+
from .group_agent import GroupAgent
6+
from .user_agent import UserAgent
7+
from .functioncall_agent import FunctioncallAgent
8+
from ..schemas import AgentConfig
9+
10+
__all__ = [
11+
"BaseAgent",
12+
"SingleAgent",
13+
"ReactAgent",
14+
"TaskAgent",
15+
"GroupAgent",
16+
"UserAgent",
17+
"FunctioncallAgent"
18+
]
19+
20+
21+
def get_agent(agent_config: AgentConfig) -> BaseAgent:
22+
"""Get the agent by agent config
23+
24+
Args:
25+
agent_config (`AgentConfig`): The agent config
26+
27+
Returns:
28+
`BaseAgent`: The specific agent
29+
"""
30+
return BaseAgent.init_from_project_config(agent_config)

0 commit comments

Comments
 (0)