Skip to content

Commit 6b7111c

Browse files
authored
Merge pull request #8433 from Psanyi89/psanyi89_n8n_response_parsing
bug: Missing response parsing for N8N Ai Agent Responses
2 parents 903a39c + 287a535 commit 6b7111c

File tree

1 file changed

+49
-1
lines changed

1 file changed

+49
-1
lines changed

core/llm/llms/Ollama.ts

Lines changed: 49 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,18 @@ type OllamaErrorResponse = {
115115
error: string;
116116
};
117117

118+
type N8nChatReponse = {
119+
type: string;
120+
content?: string;
121+
metadata: {
122+
nodeId: string;
123+
nodeName: string;
124+
itemIndex: number;
125+
runIndex: number;
126+
timestamps: number;
127+
};
128+
};
129+
118130
type OllamaRawResponse =
119131
| OllamaErrorResponse
120132
| (OllamaBaseResponse & {
@@ -125,7 +137,8 @@ type OllamaChatResponse =
125137
| OllamaErrorResponse
126138
| (OllamaBaseResponse & {
127139
message: OllamaChatMessage;
128-
});
140+
})
141+
| N8nChatReponse;
129142

130143
interface OllamaTool {
131144
type: "function";
@@ -435,12 +448,47 @@ class Ollama extends BaseLLM implements ModelInstaller {
435448
body: JSON.stringify(chatOptions),
436449
signal,
437450
});
451+
let isThinking: boolean = false;
438452

439453
function convertChatMessage(res: OllamaChatResponse): ChatMessage[] {
440454
if ("error" in res) {
441455
throw new Error(res.error);
442456
}
443457

458+
if ("type" in res) {
459+
const { content } = res;
460+
461+
if (content === "<think>") {
462+
isThinking = true;
463+
}
464+
465+
if (isThinking && content) {
466+
// TODO better support for streaming thinking chunks, or remove this and depend on redux <think/> parsing logic
467+
const thinkingMessage: ThinkingChatMessage = {
468+
role: "thinking",
469+
content: content,
470+
};
471+
472+
if (thinkingMessage) {
473+
// could cause issues with termination if chunk doesn't match this exactly
474+
if (content === "</think>") {
475+
isThinking = false;
476+
}
477+
// When Streaming you can't have both thinking and content
478+
return [thinkingMessage];
479+
}
480+
}
481+
482+
if (content) {
483+
const chatMessage: ChatMessage = {
484+
role: "assistant",
485+
content: content,
486+
};
487+
return [chatMessage];
488+
}
489+
return [];
490+
}
491+
444492
const { role, content, thinking, tool_calls: toolCalls } = res.message;
445493

446494
if (role === "tool") {

0 commit comments

Comments
 (0)