Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
155 changes: 155 additions & 0 deletions libs/langchain-google-genai/src/tests/chat_models.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -456,3 +456,158 @@ test("Input has no system message and multiple user messages, convert system mes
},
]);
});

test("convertMessageContentToParts: should handle AIMessage with mixed content and tool_calls, and HumanMessage with mixed content", () => {
const isMultimodalModel = true;
const base64ImageData =
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="; // 1x1 black pixel

const aiMessageWithString = new AIMessage({
content: "This is the AI text response.",
tool_calls: [
{ name: "get_weather", args: { location: "London" }, id: "tool_123" },
],
});
const expectedPartsAiString = [
{ text: "This is the AI text response." },
{ functionCall: { name: "get_weather", args: { location: "London" } } },
];
expect(
convertMessageContentToParts(aiMessageWithString, isMultimodalModel, [])
).toEqual(expectedPartsAiString);

const aiMessageWithArray = new AIMessage({
content: [
{ type: "text", text: "AI sees this image:" },
{
type: "image_url",
image_url: `data:image/png;base64,${base64ImageData}`,
},
],
tool_calls: [{ name: "describe_image", args: {}, id: "tool_789" }],
});
const expectedPartsAiArray = [
{ text: "AI sees this image:" },
{ inlineData: { mimeType: "image/png", data: base64ImageData } },
{ functionCall: { name: "describe_image", args: {} } },
];
expect(
convertMessageContentToParts(aiMessageWithArray, isMultimodalModel, [])
).toEqual(expectedPartsAiArray);

const humanMessageWithArray = new HumanMessage({
content: [
{ type: "text", text: "User sees this image:" },
{
type: "image_url",
image_url: `data:image/png;base64,${base64ImageData}`,
},
],
});
const expectedPartsHumanArray = [
{ text: "User sees this image:" },
{ inlineData: { mimeType: "image/png", data: base64ImageData } },
];
expect(
convertMessageContentToParts(humanMessageWithArray, isMultimodalModel, [])
).toEqual(expectedPartsHumanArray);
});

test("convertMessageContentToParts: should handle messages with content only (no tool_calls)", () => {
const isMultimodalModel = true;

const aiMessageWithString = new AIMessage({
content: "Just an AI text response.",
});
const expectedPartsAiString = [{ text: "Just an AI text response." }];
expect(
convertMessageContentToParts(aiMessageWithString, isMultimodalModel, [])
).toEqual(expectedPartsAiString);

const humanMessageWithString = new HumanMessage({
content: "Just a human text input.",
});
const expectedPartsHumanString = [{ text: "Just a human text input." }];
expect(
convertMessageContentToParts(humanMessageWithString, isMultimodalModel, [])
).toEqual(expectedPartsHumanString);

const aiMessageWithArray = new AIMessage({
content: [
{ type: "text", text: "AI array part 1." },
{ type: "text", text: "AI array part 2." },
],
});
const expectedPartsAiArray = [
{ text: "AI array part 1." },
{ text: "AI array part 2." },
];
expect(
convertMessageContentToParts(aiMessageWithArray, isMultimodalModel, [])
).toEqual(expectedPartsAiArray);

const humanMessageWithArray = new HumanMessage({
content: [
{ type: "text", text: "Human array part 1." },
{ type: "text", text: "Human array part 2." },
],
});
const expectedPartsHumanArray = [
{ text: "Human array part 1." },
{ text: "Human array part 2." },
];
expect(
convertMessageContentToParts(humanMessageWithArray, isMultimodalModel, [])
).toEqual(expectedPartsHumanArray);
});

test("convertMessageContentToParts: should handle AIMessage with tool_calls only (empty content)", () => {
const isMultimodalModel = true;

const messageWithEmptyString = new AIMessage({
content: "",
tool_calls: [{ name: "get_time", args: {}, id: "tool_abc" }],
});
const expectedParts = [{ functionCall: { name: "get_time", args: {} } }];
expect(
convertMessageContentToParts(messageWithEmptyString, isMultimodalModel, [])
).toEqual(expectedParts);
});

test("convertMessageContentToParts: should handle ToolMessage correctly (including name inference and errors)", () => {
const isMultimodalModel = true;

const previousAiMessage = new AIMessage({
content: "",
tool_calls: [
{ name: "get_weather", args: { location: "London" }, id: "tool_123" },
],
});
const toolMessageSuccess = new ToolMessage({
content: '{"temperature": "15C", "conditions": "Cloudy"}',
tool_call_id: "tool_123",
});
const expectedPartsSuccess = [
{
functionResponse: {
name: "get_weather",
response: { result: '{"temperature": "15C", "conditions": "Cloudy"}' },
},
},
];
expect(
convertMessageContentToParts(toolMessageSuccess, isMultimodalModel, [
previousAiMessage,
])
).toEqual(expectedPartsSuccess);

const toolMessageError = new ToolMessage({
content: "Some result",
tool_call_id: "unknown_tool_id",
});
expect(() =>
convertMessageContentToParts(toolMessageError, isMultimodalModel, [])
).toThrow(
'Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "undefined" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.'
);
});
93 changes: 44 additions & 49 deletions libs/langchain-google-genai/src/utils/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import {
type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,
type FunctionDeclaration as GenerativeAIFunctionDeclaration,
POSSIBLE_ROLES,
FunctionResponsePart,
FunctionCallPart,
} from "@google/generative-ai";
import {
Expand Down Expand Up @@ -121,30 +120,7 @@ export function convertMessageContentToParts(
isMultimodalModel: boolean,
previousMessages: BaseMessage[]
): Part[] {
if (
typeof message.content === "string" &&
message.content !== "" &&
!isToolMessage(message)
) {
return [{ text: message.content }];
}

let functionCalls: FunctionCallPart[] = [];
let functionResponses: FunctionResponsePart[] = [];
let messageParts: Part[] = [];

if (
"tool_calls" in message &&
Array.isArray(message.tool_calls) &&
message.tool_calls.length > 0
) {
functionCalls = message.tool_calls.map((tc) => ({
functionCall: {
name: tc.name,
args: tc.args,
},
}));
} else if (isToolMessage(message) && message.content) {
if (isToolMessage(message)) {
const messageName =
message.name ??
inferToolNameFromPreviousMessages(message, previousMessages);
Expand All @@ -153,7 +129,7 @@ export function convertMessageContentToParts(
`Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "${message.id}" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.`
);
}
functionResponses = [
return [
{
functionResponse: {
name: messageName,
Expand All @@ -164,23 +140,24 @@ export function convertMessageContentToParts(
},
},
];
} else if (Array.isArray(message.content)) {
messageParts = message.content.map((c) => {
}

let functionCalls: FunctionCallPart[] = [];
const messageParts: Part[] = [];

if (typeof message.content === "string" && message.content) {
messageParts.push({ text: message.content });
}

if (Array.isArray(message.content)) {
message.content.forEach((c) => {
if (c.type === "text") {
return {
text: c.text,
};
messageParts.push({ text: c.text });
} else if (c.type === "executableCode") {
return {
executableCode: c.executableCode,
};
messageParts.push({ executableCode: c.executableCode });
} else if (c.type === "codeExecutionResult") {
return {
codeExecutionResult: c.codeExecutionResult,
};
}

if (c.type === "image_url") {
messageParts.push({ codeExecutionResult: c.codeExecutionResult });
} else if (c.type === "image_url") {
if (!isMultimodalModel) {
throw new Error(`This model does not support images`);
}
Expand All @@ -202,40 +179,58 @@ export function convertMessageContentToParts(
throw new Error("Please provide image as base64 encoded data URL");
}

return {
messageParts.push({
inlineData: {
data,
mimeType,
},
};
});
} else if (c.type === "media") {
return messageContentMedia(c);
messageParts.push(messageContentMedia(c));
} else if (c.type === "tool_use") {
return {
functionCalls.push({
functionCall: {
name: c.name,
args: c.input,
},
};
});
} else if (
c.type?.includes("/") &&
// Ensure it's a single slash.
c.type.split("/").length === 2 &&
"data" in c &&
typeof c.data === "string"
) {
return {
messageParts.push({
inlineData: {
mimeType: c.type,
data: c.data,
},
};
});
} else if ("functionCall" in c) {
// No action needed here — function calls will be added later from message.tool_calls
} else {
if ("type" in c) {
throw new Error(`Unknown content type ${c.type}`);
} else {
throw new Error(`Unknown content ${JSON.stringify(c)}`);
}
}
throw new Error(`Unknown content type ${(c as { type: string }).type}`);
});
}

return [...messageParts, ...functionCalls, ...functionResponses];
if (isAIMessage(message) && message.tool_calls?.length) {
functionCalls = message.tool_calls.map((tc) => {
return {
functionCall: {
name: tc.name,
args: tc.args,
},
};
});
}

return [...messageParts, ...functionCalls];
}

export function convertBaseMessagesToContent(
Expand Down
Loading