Skip to content

Commit eb08c92

Browse files
committed
return tool calling as reasoning content
1 parent 85e3bde commit eb08c92

File tree

1 file changed

+48
-9
lines changed

1 file changed

+48
-9
lines changed

packages/query/src/llm/chat.module.ts

Lines changed: 48 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -90,23 +90,44 @@ When generating SQL queries:
9090
}
9191

9292
// Convert OpenAI format messages to LangChain format
93-
const lastMessage = messages[messages.length - 1];
94-
const question = lastMessage.content;
93+
// const lastMessage = messages[messages.length - 1];
94+
// const question = lastMessage.content;
9595

9696
res.setHeader('Content-Type', 'text/event-stream');
9797
res.setHeader('Cache-Control', 'no-cache');
9898
res.setHeader('Connection', 'keep-alive');
9999
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
100-
const result = await this.agent!.stream({messages: [['user', question]]}, {streamMode: 'values'});
100+
const result = await this.agent!.stream({messages}, {streamMode: 'values'});
101101

102102
let fullResponse = '';
103+
let first = true;
104+
let thinking = false;
103105
for await (const event of result) {
104106
const lastMsg: BaseMessage = event.messages[event.messages.length - 1];
105-
if (lastMsg.content) {
106-
fullResponse = lastMsg.content as string;
107-
logger.info(`Streaming response: ${JSON.stringify(lastMsg)}`);
108-
if (argv['llm-debug'] && stream) {
107+
fullResponse = lastMsg.content as string;
108+
if (lastMsg.content && lastMsg.getType() === 'tool') {
109+
if (argv['chat-debug'] && stream && lastMsg.response_metadata?.finish_reason !== 'stop') {
110+
if (first) {
111+
res.write(
112+
`data: ${JSON.stringify({
113+
id: `chatcmpl-${Date.now()}`,
114+
object: 'chat.completion.chunk',
115+
created: Math.floor(Date.now() / 1000),
116+
model: process.env.OPENAI_MODEL,
117+
choices: [
118+
{
119+
index: 0,
120+
delta: {role: 'assistant', content: '<think>\n\n'},
121+
finish_reason: null,
122+
},
123+
],
124+
})}\n\n`
125+
);
126+
first = false;
127+
thinking = true;
128+
}
109129
// todo: send them as thinking details
130+
logger.info(`Streaming response: ${JSON.stringify(lastMsg)}`);
110131
res.write(
111132
`data: ${JSON.stringify({
112133
id: `chatcmpl-${Date.now()}`,
@@ -116,18 +137,36 @@ When generating SQL queries:
116137
choices: [
117138
{
118139
index: 0,
119-
delta: {content: lastMsg.content},
140+
delta: {content: `${lastMsg.name}: ${lastMsg.content} \n\n`},
120141
finish_reason: null,
121142
},
122143
],
123144
})}\n\n`
124145
);
125146
}
126147
}
148+
if (lastMsg.response_metadata?.finish_reason === 'stop' && thinking) {
149+
res.write(
150+
`data: ${JSON.stringify({
151+
id: `chatcmpl-${Date.now()}`,
152+
object: 'chat.completion.chunk',
153+
created: Math.floor(Date.now() / 1000),
154+
model: process.env.OPENAI_MODEL,
155+
choices: [
156+
{
157+
index: 0,
158+
delta: {content: '</think>\n\n'},
159+
finish_reason: null,
160+
},
161+
],
162+
})}\n\n`
163+
);
164+
}
127165
}
128166

129167
// Send final message
130168
if (stream) {
169+
logger.info(`Final response: ${JSON.stringify(fullResponse)}`);
131170
res.write(
132171
`data: ${JSON.stringify({
133172
id: `chatcmpl-${Date.now()}`,
@@ -137,7 +176,7 @@ When generating SQL queries:
137176
choices: [
138177
{
139178
index: 0,
140-
message: {role: 'assistant', content: fullResponse},
179+
delta: {role: 'assistant', content: fullResponse},
141180
finish_reason: 'stop',
142181
},
143182
],

0 commit comments

Comments
 (0)