Skip to content

Commit 3818ce8

Browse files
authored
[Responses API] Function calling (#1587)
Built on top of #1576. Based on https://platform.openai.com/docs/api-reference/responses/create and https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming Works both with and without streaming. **Note:** implementation starts to be a completely messy, especially in streaming mode. Complexity increases as we add new event types. I do think a refactoring would be beneficial e.g. with an internal state object that keeps track of the current state and "knows" what to emit and when (typically to emit the "done"/"completed" events each time a new output/content is generated). Food for thoughts for a future PR. ## Non-stream **Run** ```bash pnpm run example function ``` **Output** ```js { created_at: 1751467285177, error: null, id: 'resp_0b2ab98168a9813e0f7373f940221da4ef3211f43c9faac8', instructions: null, max_output_tokens: null, metadata: null, model: 'meta-llama/Llama-3.3-70B-Instruct', object: 'response', output: [ { type: 'function_call', id: 'fc_f40ac964165602e2fcb2f955777acff8c4b9359d49eaf79b', call_id: '9cd167c7f', name: 'get_current_weather', arguments: '{"location": "Boston, MA", "unit": "fahrenheit"}', status: 'completed' } ], status: 'completed', tool_choice: 'auto', tools: [ { name: 'get_current_weather', parameters: [Object], strict: true, type: 'function', description: 'Get the current weather in a given location' } ], temperature: 1, top_p: 1, output_text: '' } ``` ## Stream **Run:** ``` pnpm run example function_streaming ``` **Output:** ```js { type: 'response.created', response: { created_at: 1751467334073, error: null, id: 'resp_8d86745178f2b9fc0da000156655956181c76a7701712a05', instructions: null, max_output_tokens: null, metadata: null, model: 'meta-llama/Llama-3.3-70B-Instruct', object: 'response', output: [], status: 'in_progress', tool_choice: 'auto', tools: [ [Object] ], temperature: 1, top_p: 1 }, sequence_number: 0 } { type: 'response.in_progress', response: { created_at: 1751467334073, error: null, id: 'resp_8d86745178f2b9fc0da000156655956181c76a7701712a05', instructions: null, max_output_tokens: null, metadata: null, model: 'meta-llama/Llama-3.3-70B-Instruct', object: 'response', output: [], status: 'in_progress', tool_choice: 'auto', tools: [ [Object] ], temperature: 1, top_p: 1 }, sequence_number: 1 } { type: 'response.output_item.added', output_index: 0, item: { type: 'function_call', id: 'fc_9bdc8945b9cb6c95c5c248db4203f0707ba9fd338dee2454', call_id: '83a9d4baf', name: 'get_weather', arguments: '' }, sequence_number: 2 } { type: 'response.function_call_arguments.delta', item_id: 'fc_9bdc8945b9cb6c95c5c248db4203f0707ba9fd338dee2454', output_index: 0, delta: '{"latitude": 48.8567, "longitude": 2.3508}', sequence_number: 3 } { type: 'response.function_call_arguments.done', item_id: 'fc_9bdc8945b9cb6c95c5c248db4203f0707ba9fd338dee2454', output_index: 0, arguments: '{"latitude": 48.8567, "longitude": 2.3508}', sequence_number: 4 } { type: 'response.output_item.done', output_index: 0, item: { type: 'function_call', id: 'fc_9bdc8945b9cb6c95c5c248db4203f0707ba9fd338dee2454', call_id: '83a9d4baf', name: 'get_weather', arguments: '{"latitude": 48.8567, "longitude": 2.3508}', status: 'completed' }, sequence_number: 5 } { type: 'response.completed', response: { created_at: 1751467334073, error: null, id: 'resp_8d86745178f2b9fc0da000156655956181c76a7701712a05', instructions: null, max_output_tokens: null, metadata: null, model: 'meta-llama/Llama-3.3-70B-Instruct', object: 'response', output: [ [Object] ], status: 'completed', tool_choice: 'auto', tools: [ [Object] ], temperature: 1, top_p: 1 }, sequence_number: 6 } ```
1 parent 9c06344 commit 3818ce8

File tree

4 files changed

+301
-73
lines changed

4 files changed

+301
-73
lines changed
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import OpenAI from "openai";
2+
3+
const openai = new OpenAI({ baseURL: "http://localhost:3000/v1", apiKey: process.env.HF_TOKEN });
4+
5+
const tools = [
6+
{
7+
type: "function",
8+
name: "get_current_weather",
9+
description: "Get the current weather in a given location",
10+
parameters: {
11+
type: "object",
12+
properties: {
13+
location: {
14+
type: "string",
15+
description: "The city and state, e.g. San Francisco, CA",
16+
},
17+
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
18+
},
19+
required: ["location", "unit"],
20+
},
21+
},
22+
];
23+
24+
const response = await openai.responses.create({
25+
model: "meta-llama/Llama-3.3-70B-Instruct",
26+
provider: "cerebras",
27+
tools: tools,
28+
input: "What is the weather like in Boston today?",
29+
tool_choice: "auto",
30+
});
31+
32+
console.log(response);
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import { OpenAI } from "openai";
2+
3+
const openai = new OpenAI({ baseURL: "http://localhost:3000/v1", apiKey: process.env.HF_TOKEN });
4+
5+
const tools = [
6+
{
7+
type: "function",
8+
name: "get_weather",
9+
description: "Get current temperature for provided coordinates in celsius.",
10+
parameters: {
11+
type: "object",
12+
properties: {
13+
latitude: { type: "number" },
14+
longitude: { type: "number" },
15+
},
16+
required: ["latitude", "longitude"],
17+
additionalProperties: false,
18+
},
19+
strict: true,
20+
},
21+
];
22+
23+
const stream = await openai.responses.create({
24+
model: "meta-llama/Llama-3.3-70B-Instruct",
25+
provider: "cerebras",
26+
input: [{ role: "user", content: "What's the weather like in Paris today?" }],
27+
tools,
28+
stream: true,
29+
});
30+
31+
for await (const event of stream) {
32+
console.log(event);
33+
}

0 commit comments

Comments
 (0)