Skip to content

Commit 8d63e27

Browse files
feat: Clean up and translate comments to English
Co-authored-by: aider (gemini/gemini-2.5-pro) <[email protected]>
1 parent ee2641e commit 8d63e27

File tree

9 files changed

+79
-97
lines changed

9 files changed

+79
-97
lines changed

packages/mcp-server/src/bridge/bridge.ts

Lines changed: 12 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ import {
77
type Tool as GcliTool,
88
type ToolResult,
99
GeminiChat,
10-
WebFetchTool, // <--- 添加这个导入
11-
WebSearchTool, // <--- 添加这个导入
10+
WebFetchTool,
11+
WebSearchTool,
1212
} from '@google/gemini-cli-core';
1313
import {
1414
type CallToolResult,
@@ -27,7 +27,6 @@ export class GcliMcpBridge {
2727
private readonly config: Config;
2828
private readonly cliVersion: string;
2929
private readonly debugMode: boolean;
30-
// **修改 2: 更新 transports 的类型以存储每个会话的 McpServer 和 Transport**
3130
private readonly sessions: Record<
3231
string,
3332
{ mcpServer: McpServer; transport: StreamableHTTPServerTransport }
@@ -39,7 +38,6 @@ export class GcliMcpBridge {
3938
this.debugMode = debugMode;
4039
}
4140

42-
// **辅助方法:创建一个新的 McpServer 实例**
4341
private async createNewMcpServer(): Promise<McpServer> {
4442
const server = new McpServer(
4543
{
@@ -48,7 +46,6 @@ export class GcliMcpBridge {
4846
},
4947
{ capabilities: { logging: {} } },
5048
);
51-
// 在这里立即注册所有工具
5249
await this.registerAllGcliTools(server);
5350
return server;
5451
}
@@ -57,7 +54,6 @@ export class GcliMcpBridge {
5754
app.all('/mcp', async (req: Request, res: Response) => {
5855
const sessionId = req.headers['mcp-session-id'] as string | undefined;
5956

60-
// **修改 5: 从 `sessions` map 中获取会话对象**
6157
let session = sessionId ? this.sessions[sessionId] : undefined;
6258

6359
if (!session) {
@@ -68,7 +64,6 @@ export class GcliMcpBridge {
6864
);
6965

7066
try {
71-
// **修改 6: 为新会话创建独立的 McpServer 和 Transport**
7267
const newMcpServer = await this.createNewMcpServer();
7368
const newTransport = new StreamableHTTPServerTransport({
7469
sessionIdGenerator: () => randomUUID(),
@@ -77,7 +72,6 @@ export class GcliMcpBridge {
7772
this.debugMode,
7873
`Session initialized: ${newSessionId}`,
7974
);
80-
// 存储新的会话对象
8175
this.sessions[newSessionId] = {
8276
mcpServer: newMcpServer,
8377
transport: newTransport,
@@ -96,7 +90,6 @@ export class GcliMcpBridge {
9690
}
9791
};
9892

99-
// 将新的 transport 连接到新的 McpServer 实例
10093
await newMcpServer.connect(newTransport);
10194

10295
session = { mcpServer: newMcpServer, transport: newTransport };
@@ -130,7 +123,6 @@ export class GcliMcpBridge {
130123
}
131124

132125
try {
133-
// **修改 7: 使用会话特定的 transport 来处理请求**
134126
await session.transport.handleRequest(req, res, req.body);
135127
} catch (e) {
136128
logger.error('Error handling request:', e);
@@ -150,33 +142,30 @@ export class GcliMcpBridge {
150142
}
151143

152144
private registerGcliTool(tool: GcliTool, mcpServer: McpServer) {
153-
let toolInstanceForExecution = tool; // 默认使用从 ToolRegistry 传入的原始工具实例
145+
let toolInstanceForExecution = tool;
154146

155-
// 检查是否是需要特殊处理的网页工具
147+
// For web tools, check if a custom model is specified via environment variable.
148+
// If so, create a new tool instance with a proxied config to use that model.
156149
if (tool.name === 'google_web_search' || tool.name === 'web_fetch') {
157150
const toolModel = process.env.GEMINI_TOOLS_DEFAULT_MODEL;
158151

159-
// 如果为这些工具设置了专用的模型,则创建一个新的配置和工具实例
160152
if (toolModel) {
161153
logger.debug(
162154
this.debugMode,
163155
`Using custom model "${toolModel}" for tool "${tool.name}"`,
164156
);
165157

166-
// 步骤 1: 创建一个 this.config 的代理。
167-
// 这个代理对象会拦截对 getModel 方法的调用。
158+
// Create a proxy for this.config to override getModel.
168159
const proxyConfig = new Proxy(this.config, {
169160
get: (target, prop, receiver) => {
170-
// 如果调用的方法是 getModel,则返回我们指定的工具模型
171161
if (prop === 'getModel') {
172162
return () => toolModel;
173163
}
174-
// 对于所有其他属性和方法的调用,都代理到原始的 config 对象
175164
return Reflect.get(target, prop, receiver);
176165
},
177166
}) as Config;
178167

179-
// 步骤 2: 根据工具名称,使用这个代理配置来创建新的工具实例
168+
// Create a new tool instance with the proxied config.
180169
if (tool.name === 'google_web_search') {
181170
toolInstanceForExecution = new WebSearchTool(proxyConfig);
182171
} else {
@@ -199,8 +188,7 @@ export class GcliMcpBridge {
199188
const startTime = Date.now();
200189
logger.info('MCP tool call started', { toolName: tool.name, args });
201190
try {
202-
// *** 关键:现在所有工具都通过这个统一的路径执行 ***
203-
// toolInstanceForExecution 要么是原始工具,要么是带有自定义模型配置的新实例
191+
// toolInstanceForExecution is either the original tool or a new instance with a custom model config.
204192
const result = await toolInstanceForExecution.execute(
205193
args,
206194
extra.signal,
@@ -218,7 +206,8 @@ export class GcliMcpBridge {
218206
toolName: tool.name,
219207
durationMs,
220208
});
221-
throw e; // 重新抛出错误,让 MCP SDK 处理
209+
// Re-throw the error to be handled by the MCP SDK.
210+
throw e;
222211
}
223212
},
224213
);
@@ -242,7 +231,7 @@ export class GcliMcpBridge {
242231
case 'boolean':
243232
return z.boolean().describe(prop.description || '');
244233
case 'array':
245-
// This is the key fix: recursively call the converter for `items`.
234+
// Recursively call the converter for `items`.
246235
if (!prop.items) {
247236
// A valid array schema MUST have `items`. Fallback to `any` if missing.
248237
return z.array(z.any()).describe(prop.description || '');
@@ -261,7 +250,6 @@ export class GcliMcpBridge {
261250
}
262251
};
263252

264-
// If no schema or properties, return an empty shape object.
265253
if (!jsonSchema || !jsonSchema.properties) {
266254
return {};
267255
}
@@ -275,7 +263,7 @@ export class GcliMcpBridge {
275263
}
276264
shape[key] = fieldSchema;
277265
}
278-
return shape; // Directly return the shape object.
266+
return shape;
279267
}
280268

281269
private convertGcliResultToMcpResult(

packages/mcp-server/src/bridge/openai.ts

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
import { Router, Request, Response } from 'express';
22
import { type Config } from '@google/gemini-cli-core';
33
import { createOpenAIStreamTransformer } from './stream-transformer.js';
4-
import { GeminiApiClient } from '../gemini-client.js'; // <-- 引入新类
5-
import { type OpenAIChatCompletionRequest } from '../types.js'; // <-- 引入新类型
4+
import { GeminiApiClient } from '../gemini-client.js';
5+
import { type OpenAIChatCompletionRequest } from '../types.js';
66
import { mapErrorToOpenAIError } from '../utils/error-mapper.js';
77
import { logger } from '../utils/logger.js';
88
import { randomUUID } from 'node:crypto';
99

1010
export function createOpenAIRouter(config: Config, debugMode = false): Router {
1111
const router = Router();
1212

13-
// 中间件:为每个请求添加 requestId
13+
// Middleware: Add a requestId to each request.
1414
router.use((req, res, next) => {
1515
(req as any).requestId = randomUUID();
1616
next();
@@ -31,23 +31,21 @@ export function createOpenAIRouter(config: Config, debugMode = false): Router {
3131
const stream = body.stream !== false;
3232

3333
if (!stream) {
34-
// 非流式响应逻辑可以稍后实现,或直接返回错误
34+
// Non-streaming responses are not yet implemented.
3535
res
3636
.status(501)
3737
.json({ error: 'Non-streaming responses are not yet implemented.' });
3838
return;
3939
}
4040

41-
// --- 流式响应 ---
41+
// --- Streaming Response ---
4242
res.setHeader('Content-Type', 'text/event-stream');
4343
res.setHeader('Cache-Control', 'no-cache');
4444
res.setHeader('Connection', 'keep-alive');
4545
res.flushHeaders();
4646

47-
// 1. 使用新的 GeminiApiClient
4847
const client = new GeminiApiClient(config, debugMode);
4948

50-
// 2. 发起请求,传递所有相关参数
5149
const geminiStream = await client.sendMessageStream({
5250
model: body.model,
5351
messages: body.messages,
@@ -57,8 +55,8 @@ export function createOpenAIRouter(config: Config, debugMode = false): Router {
5755

5856
const openAIStream = createOpenAIStreamTransformer(body.model, debugMode);
5957

60-
// --- 修正的核心逻辑 ---
61-
// 1. 创建一个 ReadableStream 来包装我们的 Gemini 事件流
58+
// --- Core streaming logic ---
59+
// Create a ReadableStream to wrap our Gemini event stream.
6260
const readableStream = new ReadableStream({
6361
async start(controller) {
6462
for await (const value of geminiStream) {
@@ -68,11 +66,11 @@ export function createOpenAIRouter(config: Config, debugMode = false): Router {
6866
},
6967
});
7068

71-
// 2. 将我们的流通过转换器
69+
// Pipe our stream through the transformer.
7270
const transformedStream = readableStream.pipeThrough(openAIStream);
7371
const reader = transformedStream.getReader();
7472

75-
// 3. 手动读取每个转换后的块并立即写入响应
73+
// Manually read each transformed chunk and write it to the response immediately.
7674
try {
7775
while (true) {
7876
const { done, value } = await reader.read();
@@ -84,7 +82,7 @@ export function createOpenAIRouter(config: Config, debugMode = false): Router {
8482
} finally {
8583
reader.releaseLock();
8684
}
87-
// --- 修正结束 ---
85+
// --- End of core streaming logic ---
8886

8987
const durationMs = Date.now() - startTime;
9088
logger.info('OpenAI bridge request finished', {
@@ -103,20 +101,20 @@ export function createOpenAIRouter(config: Config, debugMode = false): Router {
103101
// 调用新的错误映射函数
104102
const { openAIError, statusCode } = mapErrorToOpenAIError(e);
105103

106-
// 使用映射后的状态码和错误对象进行响应
107104
if (!res.headersSent) {
108105
res.status(statusCode).json(openAIError);
109106
} else {
110-
// 如果流已经开始,我们无法改变状态码,但可以在流中发送错误
107+
// If headers are already sent, we can't change the status code,
108+
// but we can send an error in the stream.
111109
res.write(`data: ${JSON.stringify({ error: openAIError.error })}\n\n`);
112110
res.end();
113111
}
114112
}
115113
});
116114

117-
// 可以添加 /v1/models 端点
115+
// The /v1/models endpoint can be added here.
118116
router.get('/models', (req, res) => {
119-
// 这里可以返回一个固定的模型列表,或者从 config 中获取
117+
// This can return a fixed list of models or get them from the config.
120118
res.json({
121119
object: 'list',
122120
data: [

packages/mcp-server/src/bridge/stream-transformer.ts

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { randomUUID } from 'node:crypto';
22
import { type StreamChunk } from '../types.js';
33

4-
// --- OpenAI 响应结构接口 ---
4+
// --- OpenAI Response Interfaces ---
55
interface OpenAIDelta {
66
role?: 'assistant';
77
content?: string | null;
@@ -28,7 +28,7 @@ interface OpenAIChunk {
2828
}[];
2929
}
3030

31-
// --- 新的、有状态的转换器 ---
31+
// --- New Stateful Transformer ---
3232
export function createOpenAIStreamTransformer(
3333
model: string,
3434
debugMode = false,
@@ -89,13 +89,13 @@ export function createOpenAIStreamTransformer(
8989

9090
case 'tool_code': {
9191
const { name, args } = chunk.data;
92-
// **重要**: 在 ID 中嵌入函数名,以便在收到工具响应时可以解析它
92+
// IMPORTANT: Embed the function name in the ID so it can be parsed when a tool response is received.
9393
const toolCallId = `call_${name}_${randomUUID()}`;
9494

95-
// OpenAI 流式工具调用需要分块发送
96-
// 1. 发送包含函数名的块
95+
// OpenAI streaming tool calls need to be sent in chunks.
96+
// 1. Send the chunk containing the function name.
9797
const nameDelta: OpenAIDelta = {
98-
...delta, // 包含 role (如果是第一个块)
98+
...delta, // Include role if it's the first chunk
9999
tool_calls: [
100100
{
101101
index: toolCallIndex,
@@ -107,7 +107,7 @@ export function createOpenAIStreamTransformer(
107107
};
108108
enqueueChunk(controller, createChunk(nameDelta));
109109

110-
// 2. 发送包含参数的块
110+
// 2. Send the chunk containing the arguments.
111111
const argsDelta: OpenAIDelta = {
112112
tool_calls: [
113113
{
@@ -125,7 +125,7 @@ export function createOpenAIStreamTransformer(
125125
}
126126

127127
case 'reasoning':
128-
// 这些事件目前在 OpenAI 格式中没有直接对应项,可以选择忽略或以某种方式记录
128+
// These events currently have no direct equivalent in the OpenAI format and can be ignored or logged.
129129
if (debugMode) {
130130
console.log(`[Stream Transformer] Ignoring chunk: ${chunk.type}`);
131131
}
@@ -134,7 +134,7 @@ export function createOpenAIStreamTransformer(
134134
},
135135

136136
flush(controller) {
137-
// 在流结束时,发送一个带有 `tool_calls` 或 `stop` 的 finish_reason
137+
// At the end of the stream, send a finish_reason of 'tool_calls' or 'stop'.
138138
const finish_reason = toolCallIndex > 0 ? 'tool_calls' : 'stop';
139139
enqueueChunk(controller, createChunk({}, finish_reason));
140140

0 commit comments

Comments
 (0)