forked from gptscript-ai/py-gptscript
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgptscript.py
429 lines (359 loc) · 16 KB
/
gptscript.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
import base64
import json
import os
import platform
from subprocess import Popen, PIPE
from sys import executable
from typing import Any, Callable, Awaitable, List
from gptscript.confirm import AuthResponse
from gptscript.credentials import Credential, to_credential
from gptscript.datasets import DatasetMeta, Dataset, DatasetElementMeta, DatasetElement
from gptscript.fileinfo import FileInfo
from gptscript.frame import RunFrame, CallFrame, PromptFrame, Program
from gptscript.opts import GlobalOptions
from gptscript.prompt import PromptResponse
from gptscript.run import Run, RunBasicCommand, Options
from gptscript.text import Text
from gptscript.tool import ToolDef, Tool
class GPTScript:
__gptscript_count = 0
__server_url = ""
__process: Popen = None
def __init__(self, opts: GlobalOptions = None):
if opts is None:
opts = GlobalOptions()
self.opts = opts
start_sdk = GPTScript.__process is None and GPTScript.__server_url == "" and self.opts.URL == ""
GPTScript.__gptscript_count += 1
if GPTScript.__server_url == "":
GPTScript.__server_url = os.environ.get("GPTSCRIPT_URL", "")
start_sdk = start_sdk and GPTScript.__server_url == ""
if start_sdk:
self.opts.toEnv()
GPTScript.__process = Popen(
[_get_command(), "sys.sdkserver", "--listen-address", "127.0.0.1:0"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env={e.split("=", 1)[0]: e.split("=", 1)[1] for e in self.opts.Env},
text=True,
encoding="utf-8",
)
GPTScript.__server_url = GPTScript.__process.stderr.readline().strip("\n")
if "=" in GPTScript.__server_url:
GPTScript.__server_url = GPTScript.__server_url.split("=")[1]
if self.opts.URL == "":
self.opts.URL = GPTScript.__server_url
if not (self.opts.URL.startswith("http://") or self.opts.URL.startswith("https://")):
self.opts.URL = f"http://{self.opts.URL}"
self.opts.Env.append("GPTSCRIPT_URL=" + self.opts.URL)
if self.opts.Token == "":
self.opts.Token = os.environ.get("GPTSCRIPT_TOKEN", "")
if self.opts.Token != "":
self.opts.Env.append("GPTSCRIPT_TOKEN=" + self.opts.Token)
def close(self):
GPTScript.__gptscript_count -= 1
if GPTScript.__gptscript_count == 0 and GPTScript.__process is not None:
GPTScript.__process.stdin.close()
GPTScript.__process.wait()
GPTScript.__process = None
self.opts = None
def evaluate(
self,
tool: ToolDef | list[ToolDef],
opts: Options = None,
event_handlers: list[Callable[[Run, CallFrame | RunFrame | PromptFrame], Awaitable[None]]] = None
) -> Run:
opts = opts if opts is not None else Options()
return Run(
"evaluate",
tool,
opts.merge_global_opts(self.opts),
event_handlers=event_handlers,
).next_chat(opts.input)
def run(
self, tool_path: str,
opts: Options = None,
event_handlers: list[Callable[[Run, CallFrame | RunFrame | PromptFrame], Awaitable[None]]] = None
) -> Run:
opts = opts if opts is not None else Options()
return Run(
"run",
tool_path,
opts.merge_global_opts(self.opts),
event_handlers=event_handlers,
).next_chat(opts.input)
async def load_file(self, file_path: str, disable_cache: bool = False, sub_tool: str = '') -> Program:
out = await self._run_basic_command(
"load",
{"file": file_path, "disableCache": disable_cache, "subTool": sub_tool},
)
parsed_nodes = json.loads(out)
return Program(**parsed_nodes.get("program", {}))
async def load_content(self, content: str, disable_cache: bool = False, sub_tool: str = '') -> Program:
out = await self._run_basic_command(
"load",
{"content": content, "disableCache": disable_cache, "subTool": sub_tool},
)
parsed_nodes = json.loads(out)
return Program(**parsed_nodes.get("program", {}))
async def load_tools(self, tool_defs: list[ToolDef], disable_cache: bool = False, sub_tool: str = '') -> Program:
out = await self._run_basic_command(
"load",
{"toolDefs": [t.to_json() for t in tool_defs], "disableCache": disable_cache, "subTool": sub_tool},
)
parsed_nodes = json.loads(out)
return Program(**parsed_nodes.get("program", {}))
async def parse(self, file_path: str, disable_cache: bool = False) -> list[Text | Tool]:
out = await self._run_basic_command("parse", {"file": file_path, "disableCache": disable_cache})
parsed_nodes = json.loads(out)
if parsed_nodes is None or parsed_nodes.get("nodes", None) is None:
return []
return [Text(**node["textNode"]) if "textNode" in node else Tool(**node.get("toolNode", {}).get("tool", {})) for
node in parsed_nodes.get("nodes", [])]
async def parse_content(self, content: str) -> list[Text | Tool]:
out = await self._run_basic_command("parse", {"content": content})
parsed_nodes = json.loads(out)
if parsed_nodes is None or parsed_nodes.get("nodes", None) is None:
return []
return [Text(**node["textNode"]) if "textNode" in node else Tool(**node.get("toolNode", {}).get("tool", {})) for
node in parsed_nodes.get("nodes", [])]
async def fmt(self, nodes: list[Text | Tool]) -> str:
request_nodes = []
for node in nodes:
request_nodes.append(node.to_json())
return await self._run_basic_command("fmt", {"nodes": request_nodes})
async def confirm(self, resp: AuthResponse):
await self._run_basic_command("confirm/" + resp.id, {**vars(resp)})
async def prompt(self, resp: PromptResponse):
await self._run_basic_command("prompt-response/" + resp.id, resp.responses)
async def _run_basic_command(self, sub_command: str, request_body: Any = None):
run = RunBasicCommand(sub_command, request_body, self.opts.URL, self.opts.Token)
run.next_chat()
out = await run.text()
if run.err() != "":
return f"an error occurred: {out}"
return out
async def version(self) -> str:
return await self._run_basic_command("version")
async def list_models(self, providers: list[str] = None, credential_overrides: list[str] = None) -> list[str]:
if self.opts.DefaultModelProvider != "":
if providers is None:
providers = []
providers.append(self.opts.DefaultModelProvider)
return (await self._run_basic_command(
"list-models",
{"providers": providers, "credentialOverrides": credential_overrides}
)).split("\n")
async def list_credentials(self, contexts: List[str] = None, all_contexts: bool = False) -> list[Credential] | str:
if contexts is None:
contexts = ["default"]
res = await self._run_basic_command(
"credentials",
{"context": contexts, "allContexts": all_contexts}
)
if res.startswith("an error occurred:"):
return res
return [to_credential(cred) for cred in json.loads(res)]
async def create_credential(self, cred: Credential) -> str:
return await self._run_basic_command(
"credentials/create",
{"content": cred.to_json()}
)
async def reveal_credential(self, contexts: List[str] = None, name: str = "") -> Credential | str:
if contexts is None:
contexts = ["default"]
res = await self._run_basic_command(
"credentials/reveal",
{"context": contexts, "name": name}
)
if res.startswith("an error occurred:"):
return res
return to_credential(json.loads(res))
async def delete_credential(self, context: str = "default", name: str = "") -> str:
return await self._run_basic_command(
"credentials/delete",
{"context": [context], "name": name}
)
async def list_datasets(self, workspace_id: str) -> List[DatasetMeta]:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
res = await self._run_basic_command(
"datasets",
{"input": "{}", "workspaceID": workspace_id, "datasetToolRepo": self.opts.DatasetToolRepo,
"env": self.opts.Env}
)
return [DatasetMeta.model_validate(d) for d in json.loads(res)]
async def create_dataset(self, workspace_id: str, name: str, description: str = "") -> Dataset:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
if name == "":
raise ValueError("name cannot be empty")
res = await self._run_basic_command(
"datasets/create",
{
"input": json.dumps({"datasetName": name, "datasetDescription": description}),
"workspaceID": workspace_id,
"datasetToolRepo": self.opts.DatasetToolRepo,
"env": self.opts.Env,
}
)
return Dataset.model_validate_json(res)
async def add_dataset_element(self, workspace_id: str, datasetID: str, elementName: str, elementContent: str,
elementDescription: str = "") -> DatasetElementMeta:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
if datasetID == "":
raise ValueError("datasetID cannot be empty")
elif elementName == "":
raise ValueError("elementName cannot be empty")
elif elementContent == "":
raise ValueError("elementContent cannot be empty")
res = await self._run_basic_command(
"datasets/add-element",
{
"input": json.dumps({
"datasetID": datasetID,
"elementName": elementName,
"elementContent": elementContent,
"elementDescription": elementDescription,
}),
"workspaceID": workspace_id,
"datasetToolRepo": self.opts.DatasetToolRepo,
"env": self.opts.Env
}
)
return DatasetElementMeta.model_validate_json(res)
async def list_dataset_elements(self, workspace_id: str, datasetID: str) -> List[DatasetElementMeta]:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
if datasetID == "":
raise ValueError("datasetID cannot be empty")
res = await self._run_basic_command(
"datasets/list-elements",
{
"input": json.dumps({"datasetID": datasetID}),
"workspaceID": workspace_id,
"datasetToolRepo": self.opts.DatasetToolRepo,
"env": self.opts.Env
}
)
return [DatasetElementMeta.model_validate(d) for d in json.loads(res)]
async def get_dataset_element(self, workspace_id: str, datasetID: str, elementName: str) -> DatasetElement:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
if datasetID == "":
raise ValueError("datasetID cannot be empty")
elif elementName == "":
raise ValueError("elementName cannot be empty")
res = await self._run_basic_command(
"datasets/get-element",
{
"input": json.dumps({"datasetID": datasetID, "element": elementName}),
"workspaceID": workspace_id,
"datasetToolRepo": self.opts.DatasetToolRepo,
"env": self.opts.Env,
}
)
return DatasetElement.model_validate_json(res)
async def create_workspace(self, provider_type: str, from_workspaces: list[str] = None) -> str:
return await self._run_basic_command(
"workspaces/create",
{
"providerType": provider_type,
"fromWorkspaces": from_workspaces,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
)
async def delete_workspace(self, workspace_id: str):
if workspace_id == "":
raise ValueError("workspace_id cannot be empty")
await self._run_basic_command(
"workspaces/delete",
{
"id": workspace_id,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
)
async def list_files_in_workspace(self, workspace_id: str = "", prefix: str = "") -> List[str]:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
return json.loads(await self._run_basic_command(
"workspaces/list",
{
"id": workspace_id,
"prefix": prefix,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
))
async def remove_all(self, workspace_id: str = "", with_prefix: str = ""):
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
await self._run_basic_command(
"workspaces/remove-all-with-prefix",
{
"id": workspace_id,
"prefix": with_prefix,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
)
async def write_file_in_workspace(self, file_path: str, contents: bytes, workspace_id: str = ""):
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
await self._run_basic_command(
"workspaces/write-file",
{
"id": workspace_id,
"filePath": file_path,
"contents": base64.b64encode(contents).decode("utf-8") if contents is not None else None,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
)
async def delete_file_in_workspace(self, file_path: str, workspace_id: str = ""):
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
await self._run_basic_command(
"workspaces/delete-file",
{
"id": workspace_id,
"filePath": file_path,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
)
async def read_file_in_workspace(self, file_path: str, workspace_id: str = "") -> bytes:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
return base64.b64decode(await self._run_basic_command(
"workspaces/read-file",
{
"id": workspace_id,
"filePath": file_path,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
))
async def stat_file_in_workspace(self, file_path: str, workspace_id: str = "") -> FileInfo:
if workspace_id == "":
workspace_id = os.environ["GPTSCRIPT_WORKSPACE_ID"]
return FileInfo.model_validate_json(await self._run_basic_command(
"workspaces/stat-file",
{
"id": workspace_id,
"filePath": file_path,
"workspaceTool": self.opts.WorkspaceTool,
"env": self.opts.Env,
}
))
def _get_command():
if os.getenv("GPTSCRIPT_BIN") is not None:
return os.getenv("GPTSCRIPT_BIN")
bin_path = os.path.join(os.path.dirname(executable), "gptscript")
if platform.system() == "Windows":
bin_path += ".exe"
return bin_path if os.path.exists(bin_path) else "gptscript"