forked from gptscript-ai/py-gptscript
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_gptscript.py
682 lines (546 loc) · 24.9 KB
/
test_gptscript.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
import base64
import gzip
import json
import os
import platform
import subprocess
import pytest
from gptscript.confirm import AuthResponse
from gptscript.exec_utils import get_env
from gptscript.frame import RunEventType, CallFrame, RunFrame, RunState, PromptFrame
from gptscript.gptscript import GPTScript
from gptscript.install import install, gptscript_binary_name, python_bin_dir
from gptscript.opts import GlobalOptions, Options
from gptscript.prompt import PromptResponse
from gptscript.run import Run
from gptscript.text import Text
from gptscript.tool import ToolDef, ArgumentSchema, Property, Tool
# Ensure the OPENAI_API_KEY is set for testing
@pytest.fixture(scope="session", autouse=True)
def gptscript():
if os.getenv("OPENAI_API_KEY") is None:
pytest.fail("OPENAI_API_KEY not set", pytrace=False)
try:
# Start an initial GPTScript instance.
# This one doesn't have any options, but it's there to ensure that using another instance works as expected in all cases.
g_first = GPTScript()
gptscript = GPTScript(GlobalOptions(apiKey=os.getenv("OPENAI_API_KEY")))
yield gptscript
gptscript.close()
g_first.close()
except Exception as e:
pytest.fail(e, pytrace=False)
# Simple tool for testing
@pytest.fixture
def simple_tool():
return ToolDef(
instructions="What is the capital of the united states?"
)
# Complex tool for testing
@pytest.fixture
def complex_tool():
return ToolDef(
jsonResponse=True,
instructions="""Create three short graphic artist descriptions and their muses.
These should be descriptive and explain their point of view.
Also come up with a made up name, they each should be from different
backgrounds and approach art differently.
the response should be in JSON and match the format:
{
artists: [{
name: "name"
description: "description"
}]
}
""",
)
# Fixture for a list of tools
@pytest.fixture
def tool_list():
shebang = "#!/bin/bash"
if platform.system().lower() == "windows":
shebang = "#!/usr/bin/env powershell.exe"
return [
ToolDef(tools=["echo"], instructions="echo 'hello there'"),
ToolDef(name="other", tools=["echo"], instructions="echo 'hello somewhere else'"),
ToolDef(
name="echo",
tools=["sys.exec"],
description="Echoes the input",
arguments=ArgumentSchema(properties={"input": Property("The string input to echo")}),
instructions=f"""
${shebang}
echo ${input}
""",
),
]
def test_install():
install()
bin_name = str(python_bin_dir / gptscript_binary_name)
process = subprocess.Popen([bin_name, '-v'], stdout=subprocess.PIPE, text=True)
assert process.stdout.read().startswith('gptscript version ')
@pytest.mark.asyncio
async def test_create_another_gptscript():
g = GPTScript()
version = await g.version()
g.close()
assert "gptscript version" in version
@pytest.mark.asyncio
async def test_version(gptscript):
v = await gptscript.version()
assert "gptscript version " in v
@pytest.mark.asyncio
async def test_list_models(gptscript):
models = await gptscript.list_models()
assert isinstance(models, list) and len(models) > 1, "Expected list_models to return a list"
@pytest.mark.asyncio
@pytest.mark.skipif(os.environ.get("ANTHROPIC_API_KEY") is None, reason="ANTHROPIC_API_KEY not set")
async def test_list_models_from_provider(gptscript):
models = await gptscript.list_models(
providers=["github.com/gptscript-ai/claude3-anthropic-provider"],
credential_overrides=["github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY"],
)
assert isinstance(models, list) and len(models) > 1, "Expected list_models to return a list"
for model in models:
assert model.startswith("claude-3-"), "Unexpected model name"
assert model.endswith("from github.com/gptscript-ai/claude3-anthropic-provider"), "Unexpected model name"
@pytest.mark.asyncio
@pytest.mark.skipif(os.environ.get("ANTHROPIC_API_KEY") is None, reason="ANTHROPIC_API_KEY not set")
async def test_list_models_from_default_provider():
g = GPTScript(GlobalOptions(defaultModelProvider="github.com/gptscript-ai/claude3-anthropic-provider"))
try:
models = await g.list_models(
credential_overrides=["github.com/gptscript-ai/claude3-anthropic-provider/credential:ANTHROPIC_API_KEY"],
)
assert isinstance(models, list) and len(models) > 1, "Expected list_models to return a list"
for model in models:
assert model.startswith("claude-3-"), "Unexpected model name"
assert model.endswith("from github.com/gptscript-ai/claude3-anthropic-provider"), "Unexpected model name"
finally:
g.close()
@pytest.mark.asyncio
async def test_abort_run(gptscript):
async def abort_run(run: Run, e: CallFrame | RunFrame | PromptFrame):
await run.aclose()
run = gptscript.evaluate(ToolDef(instructions="What is the capital of the united states?"),
Options(disableCache=True), event_handlers=[abort_run])
assert "Run was aborted" in await run.text(), "Unexpected output from abort_run"
assert RunState.Error == run.state(), "Unexpected run state after aborting"
@pytest.mark.asyncio
async def test_restart_failed_run(gptscript):
shebang = "#!/bin/bash"
instructions = f"""{shebang}
exit ${{EXIT_CODE}}
"""
if platform.system().lower() == "windows":
shebang = "#!/usr/bin/env powershell.exe"
instructions = f"""{shebang}
exit $env:EXIT_CODE
"""
tools = [
ToolDef(tools=["my-context"]),
ToolDef(
name="my-context",
type="context",
instructions=instructions,
),
]
run = gptscript.evaluate(tools, Options(disableCache=True, env=["EXIT_CODE=1"]))
await run.text()
assert run.state() == RunState.Error, "Unexpected run state after exit 1"
run.opts.env = None
run = run.next_chat("")
await run.text()
assert run.state() != RunState.Error, "Unexpected run state after restart"
@pytest.mark.asyncio
async def test_eval_simple_tool(gptscript, simple_tool):
run = gptscript.evaluate(simple_tool)
out = await run.text()
assert "Washington" in out, "Unexpected response for tool run"
@pytest.mark.asyncio
async def test_eval_complex_tool(gptscript, complex_tool):
run = gptscript.evaluate(complex_tool, Options(disableCache=True))
out = await run.text()
assert '"artists":' in out, "Expected some output from eval using complex_tool"
@pytest.mark.asyncio
async def test_eval_tool_list(gptscript, tool_list):
run = gptscript.evaluate(tool_list)
out = await run.text()
assert out.strip() == "hello there", "Unexpected output from eval using a list of tools"
@pytest.mark.asyncio
async def test_eval_tool_list_with_sub_tool(gptscript, tool_list):
run = gptscript.evaluate(tool_list, opts=Options(subTool="other"))
out = await run.text()
assert out.strip() == "hello somewhere else", "Unexpected output from eval using a list of tools with sub tool"
@pytest.mark.asyncio
async def test_stream_exec_complex_tool(gptscript, complex_tool):
stream_output = ""
async def collect_events(run: Run, e: CallFrame | RunFrame | PromptFrame):
nonlocal stream_output
if str(e.type.name).startswith("call") and e.output is not None:
for output in e.output:
stream_output += output.content
run = gptscript.evaluate(complex_tool, Options(disableCache=True), event_handlers=[collect_events])
out = await run.text()
assert '"artists":' in out, "Expected some output from streaming using complex_tool"
assert '"artists":' in stream_output, "Expected stream_output to have output"
@pytest.mark.asyncio
async def test_stream_run_file(gptscript):
stream_output = ""
async def collect_events(run: Run, e: CallFrame | RunFrame | PromptFrame):
nonlocal stream_output
if str(e.type.name).startswith("call") and e.output is not None:
for output in e.output:
stream_output += output.content
run = gptscript.run("./tests/fixtures/test.gpt", Options(disableCache=True), event_handlers=[collect_events])
assert "Ronald Reagan" in await run.text(), "Expect streaming file to have correct output"
assert "Ronald Reagan" in stream_output, "Expect stream_output to have correct output when streaming from file"
@pytest.mark.asyncio
async def test_credential_override(gptscript):
gptscriptFile = "credential-override.gpt"
if platform.system().lower() == "windows":
gptscriptFile = "credential-override-windows.gpt"
run = gptscript.run(
f"{os.getcwd()}{os.sep}tests{os.sep}fixtures{os.sep}{gptscriptFile}",
Options(
disableCache=True,
credentialOverrides=['test.ts.credential_override:TEST_CRED=foo']
),
)
assert "foo" in await run.text(), "Expect credential override to have correct output"
@pytest.mark.asyncio
async def test_eval_with_context(gptscript):
wd = os.getcwd()
tool = ToolDef(
instructions="What is the capital of the united states?",
tools=[wd + "/tests/fixtures/acorn-labs-context.gpt"],
)
run = gptscript.evaluate(tool)
assert "Acorn Labs" == await run.text(), "Unexpected output from eval using context"
@pytest.mark.asyncio
async def test_load_simple_file(gptscript):
wd = os.getcwd()
prg = await gptscript.load_file(wd + "/tests/fixtures/test.gpt")
assert prg.toolSet[prg.entryToolId].instructions == "Who was the president of the United States in 1986?", \
"Unexpected output from parsing simple file"
@pytest.mark.asyncio
async def test_load_remote_tool(gptscript):
prg = await gptscript.load_file("github.com/gptscript-ai/context/workspace")
assert prg.entryToolId != "", "Unexpected entry tool id from remote tool"
assert len(prg.toolSet) > 0, "Unexpected number of tools in remote tool"
assert prg.name != "", "Unexpected name from remote tool"
@pytest.mark.asyncio
async def test_load_simple_content(gptscript):
wd = os.getcwd()
with open(wd + "/tests/fixtures/test.gpt") as f:
prg = await gptscript.load_content(f.read())
assert prg.toolSet[prg.entryToolId].instructions == "Who was the president of the United States in 1986?", \
"Unexpected output from parsing simple file"
@pytest.mark.asyncio
async def test_load_tools(gptscript, tool_list):
prg = await gptscript.load_tools(tool_list)
assert prg.entryToolId != "", "Unexpected entry tool id from remote tool"
assert len(prg.toolSet) > 0, "Unexpected number of tools in remote tool"
# Name will be empty in this case.
assert prg.name == "", "Unexpected name from remote tool"
@pytest.mark.asyncio
async def test_parse_simple_file(gptscript):
wd = os.getcwd()
tools = await gptscript.parse(wd + "/tests/fixtures/test.gpt")
assert len(tools) == 1, "Unexpected number of tools for parsing simple file"
assert isinstance(tools[0], Tool), "Unexpected node type from parsing simple file"
assert tools[0].instructions == "Who was the president of the United States in 1986?", \
"Unexpected output from parsing simple file"
@pytest.mark.asyncio
async def test_parse_empty_file(gptscript):
wd = os.getcwd()
tools = await gptscript.parse(wd + "/tests//fixtures/empty.gpt")
assert len(tools) == 0, "Unexpected number of tools for parsing emtpy file"
@pytest.mark.asyncio
async def test_parse_empty_str(gptscript):
tools = await gptscript.parse_content("")
assert len(tools) == 0, "Unexpected number of tools for parsing empty string"
@pytest.mark.asyncio
async def test_parse_tool_with_metadata(gptscript):
wd = os.getcwd()
tools = await gptscript.parse(wd + "/tests/fixtures/parse-with-metadata.gpt")
assert len(tools) == 2, "Unexpected number of tools for parsing simple file"
assert isinstance(tools[0], Tool), "Unexpected node type from parsing file with metadata"
assert "requests.get(" in tools[0].instructions, "Unexpected output from parsing file with metadata"
assert isinstance(tools[1], Text), "Unexpected node type from parsing file with metadata"
assert tools[1].text == "requests", "Unexpected output from parsing file with metadata"
assert tools[1].format == "metadata:foo:requirements.txt", "Unexpected output from parsing file with metadata"
@pytest.mark.asyncio
async def test_parse_tool(gptscript):
tools = await gptscript.parse_content("echo hello")
assert len(tools) == 1, "Unexpected number of tools for parsing tool"
assert isinstance(tools[0], Tool), "Unexpected node type from parsing tool"
assert tools[0].instructions == "echo hello", "Unexpected output from parsing tool"
@pytest.mark.asyncio
async def test_parse_tool_with_text_node(gptscript):
tools = await gptscript.parse_content("echo hello\n---\n!markdown\nhello")
assert len(tools) == 2, "Unexpected number of tools for parsing tool with text node"
assert isinstance(tools[0], Tool), "Unexpected node type for first tool from parsing tool with text node"
assert isinstance(tools[1], Text), "Unexpected node type for second tool from parsing tool with text node"
assert tools[0].instructions == "echo hello", "Unexpected instructions from parsing tool with text node"
assert tools[1].text == "hello", "Unexpected text node text from parsing tool with text node"
assert tools[1].format == "markdown", "Unexpected text node fmt from parsing tool with text node"
@pytest.mark.asyncio
async def test_fmt(gptscript):
nodes = [
Tool(tools=["echo"], instructions="echo hello there"),
Tool(
name="echo",
instructions="#!/bin/bash\necho hello there",
arguments=ArgumentSchema(
properties={"input": Property(description="The string input to echo")},
)
)
]
expected_output = """Tools: echo
echo hello there
---
Name: echo
Parameter: input: The string input to echo
#!/bin/bash
echo hello there
"""
assert await gptscript.fmt(nodes) == expected_output, "Unexpected output from fmt using nodes"
@pytest.mark.asyncio
async def test_fmt_with_text_node(gptscript):
nodes = [
Tool(tools=["echo"], instructions="echo hello there"),
Text(fmt="markdown", text="We now echo hello there"),
Tool(
name="echo",
instructions="#!/bin/bash\necho hello there",
arguments=ArgumentSchema(
properties={"input": Property(description="The string input to echo")},
)
)
]
expected_output = """Tools: echo
echo hello there
---
!markdown
We now echo hello there
---
Name: echo
Parameter: input: The string input to echo
#!/bin/bash
echo hello there
"""
assert await gptscript.fmt(nodes) == expected_output, "Unexpected output from fmt using nodes"
@pytest.mark.asyncio
async def test_tool_chat(gptscript):
tool = ToolDef(
chat=True,
instructions="You are a chat bot. Don't finish the conversation until I say 'bye'.",
tools=["sys.chat.finish"],
)
inputs = [
"List the three largest states in the United States by area.",
"What is the capital of the third one?",
"What timezone is the first one in?",
]
expected_outputs = [
"California",
"Sacramento",
"Alaska Time Zone",
]
run = gptscript.evaluate(tool)
await run.text()
assert run.state() == RunState.Continue, "first run in unexpected state"
for i in range(len(inputs)):
run = run.next_chat(inputs[i])
output = await run.text()
assert run.state() == RunState.Continue, "run in unexpected state"
assert expected_outputs[i] in output, "unexpected output for chat"
@pytest.mark.asyncio
async def test_file_chat(gptscript):
inputs = [
"List the 3 largest of the Great Lakes by volume.",
"What is the second largest?",
"What is the third one in the list?",
]
expected_outputs = [
"Lake Superior",
"Lake Michigan",
"Lake Huron",
]
run = gptscript.run(os.getcwd() + "/tests/fixtures/chat.gpt")
await run.text()
assert run.state() == RunState.Continue, "first run in unexpected state"
for i in range(len(inputs)):
run = run.next_chat(inputs[i])
output = await run.text()
assert run.state() == RunState.Continue, "run in unexpected state"
assert expected_outputs[i] in output, "unexpected output for chat"
@pytest.mark.asyncio
async def test_global_tools(gptscript):
run_start_seen = False
call_start_seen = False
call_progress_seen = False
call_finish_seen = False
run_finish_seen = False
event_output = ""
async def process_event(r: Run, frame: CallFrame | RunFrame | PromptFrame):
nonlocal run_start_seen, call_start_seen, call_progress_seen, call_finish_seen, run_finish_seen, event_output
if isinstance(frame, RunFrame):
if frame.type == RunEventType.runStart:
run_start_seen = True
elif frame.type == RunEventType.runFinish:
run_finish_seen = True
else:
if frame.type == RunEventType.callStart:
call_start_seen = True
elif frame.type == RunEventType.callProgress:
call_progress_seen = True
for output in frame.output:
event_output += output.content
elif frame.type == RunEventType.callFinish:
call_finish_seen = True
run = gptscript.run(os.getcwd() + "/tests/fixtures/global-tools.gpt",
Options(disableCache=True),
event_handlers=[process_event],
)
assert "Hello!" in await run.text(), "Unexpected output from global tool test"
assert "Hello" in event_output, "Unexpected stream output from global tool test"
assert run_start_seen and call_start_seen and call_progress_seen and call_finish_seen and run_finish_seen, \
f"One of these is False: {run_start_seen}, {call_start_seen}, {call_progress_seen}, {call_finish_seen}, {run_finish_seen}"
@pytest.mark.asyncio
async def test_confirm(gptscript):
confirm_event_found = False
event_content = ""
async def process_event(r: Run, frame: CallFrame | RunFrame | PromptFrame):
nonlocal confirm_event_found, event_content
if frame.type == RunEventType.callConfirm:
confirm_event_found = True
assert '"ls' in frame.input or '"dir' in frame.input, "Unexpected confirm input: " + frame.input
await gptscript.confirm(AuthResponse(frame.id, True))
elif frame.type == RunEventType.callProgress:
for output in frame.output:
event_content += output.content
tool = ToolDef(tools=["sys.exec"], instructions="List the files in the current directory as '.'.")
out = await gptscript.evaluate(
tool,
Options(confirm=True, disableCache=True),
event_handlers=[process_event],
).text()
assert confirm_event_found, "No confirm event"
# Running the `dir` command in Windows will give the contents of the tests directory
# while running `ls` on linux will give the contents of the repo directory.
assert (
"README.md" in out and "requirements.txt" in out
) or (
"fixtures" in out and "test_gptscript.py" in out
), "Unexpected output: " + out
assert (
"README.md" in event_content and "requirements.txt" in event_content
) or (
"fixtures" in event_content and "test_gptscript.py" in event_content
), "Unexpected event output: " + event_content
@pytest.mark.asyncio
async def test_confirm_deny(gptscript):
confirm_event_found = False
event_content = ""
async def process_event(r: Run, frame: CallFrame | RunFrame | PromptFrame):
nonlocal confirm_event_found, event_content
if frame.type == RunEventType.callConfirm:
confirm_event_found = True
assert '"ls"' in frame.input, "Unexpected confirm input: " + frame.input
await gptscript.confirm(AuthResponse(frame.id, False, "I will not allow it!"))
elif frame.type == RunEventType.callProgress:
for output in frame.output:
event_content += output.content
tool = ToolDef(tools=["sys.exec"],
instructions="List the files in the current directory as '.'. If that doesn't work"
"print the word FAIL.")
out = await gptscript.evaluate(tool,
Options(confirm=True, disableCache=True),
event_handlers=[process_event],
).text()
assert confirm_event_found, "No confirm event"
assert "FAIL" in out, "Unexpected output: " + out
assert "FAIL" in event_content, "Unexpected event output: " + event_content
@pytest.mark.asyncio
async def test_prompt(gptscript):
prompt_event_found = False
event_content = ""
async def process_event(r: Run, frame: CallFrame | RunFrame | PromptFrame):
nonlocal prompt_event_found, event_content
if frame.type == RunEventType.prompt:
prompt_event_found = True
assert len(frame.fields) == 1, "Unexpected number of fields: " + str(frame.fields)
assert "first name" in frame.fields[0], "Unexpected field: " + frame.fields[0]
await gptscript.prompt(PromptResponse(frame.id, {frame.fields[0]: "Clicky"}))
elif frame.type == RunEventType.callProgress:
for output in frame.output:
event_content += output.content
tool = ToolDef(
tools=["sys.prompt"],
instructions="Use the sys.prompt user to ask the user for 'first name' which is not sensitive. After you get their first name, say hello.",
)
out = await gptscript.evaluate(
tool,
Options(prompt=True, disableCache=True),
event_handlers=[process_event],
).text()
assert prompt_event_found, "No prompt event"
assert "Clicky" in out, "Unexpected output: " + out
assert "Clicky" in event_content, "Unexpected event output: " + event_content
@pytest.mark.asyncio
async def test_prompt_with_metadata(gptscript):
prompt_event_found = False
async def process_event(r: Run, frame: CallFrame | RunFrame | PromptFrame):
nonlocal prompt_event_found
if frame.type == RunEventType.prompt:
prompt_event_found = True
assert len(frame.fields) == 1, "Unexpected number of fields: " + str(frame.fields)
assert "first name" in frame.fields[0], "Unexpected field: " + frame.fields[0]
assert "first_name" in frame.metadata, "Unexpected metadata: " + str(frame.metadata)
assert frame.metadata["first_name"] == "Clicky", "Unexpected metadata: " + str(frame.metadata)
await gptscript.prompt(PromptResponse(frame.id, {frame.fields[0]: "Clicky"}))
out = await gptscript.run(
"sys.prompt",
Options(prompt=True, disableCache=True, input='{"fields": "first name", "metadata": {"first_name": "Clicky"}}'),
event_handlers=[process_event],
).text()
assert prompt_event_found, "No prompt event"
assert "Clicky" in out, "Unexpected output: " + out
@pytest.mark.asyncio
async def test_prompt_without_prompt_allowed(gptscript):
prompt_event_found = False
async def process_event(r: Run, frame: CallFrame | RunFrame | PromptFrame):
nonlocal prompt_event_found
if frame.type == RunEventType.prompt:
prompt_event_found = True
assert len(frame.fields) == 1, "Unexpected number of fields: " + str(frame.fields)
assert "first name" in frame.fields[0], "Unexpected field: " + frame.fields[0]
await gptscript.prompt(PromptResponse(frame.id, {frame.fields[0]: "Clicky"}))
tool = ToolDef(
tools=["sys.prompt"],
instructions="Use the sys.prompt user to ask the user for 'first name' which is not sensitive. After you get their first name, say hello.",
)
run = gptscript.evaluate(
tool,
event_handlers=[process_event],
)
out = await run.text()
assert not prompt_event_found, "Prompt event occurred"
assert "prompt event occurred" in out, "Unexpected output: " + out
def test_get_env():
os.environ['TEST_ENV'] = json.dumps({
'_gz': base64.b64encode(gzip.compress(b'test value')).decode('utf-8'),
}).replace(' ', '')
assert 'test value' == get_env('TEST_ENV')
@pytest.mark.asyncio
async def test_run_file_with_metadata(gptscript):
run = gptscript.run("./tests/fixtures/parse-with-metadata.gpt")
assert "200" == await run.text(), "Expect file to have correct output"
@pytest.mark.asyncio
async def test_parse_with_metadata_then_run(gptscript):
cwd = os.getcwd().removesuffix("tests")
tools = await gptscript.parse(cwd + "/tests/fixtures/parse-with-metadata.gpt")
run = gptscript.evaluate(tools[0])
assert "200" == await run.text(), "Expect file to have correct output"