Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions packages/types/src/providers/openai-codex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,20 @@ export const openAiCodexModels = {
supportsTemperature: false,
description: "GPT-5.3 Codex: OpenAI's flagship coding model via ChatGPT subscription",
},
"gpt-5.3-codex-spark": {
maxTokens: 8192,
contextWindow: 128000,
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: false,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high", "xhigh"],
reasoningEffort: "medium",
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5.3 Codex Spark: Fast, text-only coding model via ChatGPT subscription",
},
"gpt-5.2-codex": {
maxTokens: 128000,
contextWindow: 400000,
Expand Down
228 changes: 228 additions & 0 deletions src/api/providers/__tests__/openai-codex-native-tool-calls.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -97,4 +97,232 @@ describe("OpenAiCodexHandler native tool calls", () => {
name: "attempt_completion",
})
})

it("yields text when Codex emits assistant message only in response.output_item.done", async () => {
vi.spyOn(openAiCodexOAuthManager, "getAccessToken").mockResolvedValue("test-token")
vi.spyOn(openAiCodexOAuthManager, "getAccountId").mockResolvedValue("acct_test")
;(handler as any).client = {
responses: {
create: vi.fn().mockResolvedValue({
async *[Symbol.asyncIterator]() {
yield {
type: "response.output_item.done",
item: {
type: "message",
role: "assistant",
content: [{ type: "output_text", text: "hello from spark" }],
},
output_index: 0,
}
yield {
type: "response.completed",
response: {
id: "resp_done_only",
status: "completed",
output: [
{
type: "message",
role: "assistant",
content: [{ type: "output_text", text: "hello from spark" }],
},
],
usage: { input_tokens: 1, output_tokens: 2 },
},
}
},
}),
},
}

const stream = handler.createMessage("system", [{ role: "user", content: "test" } as any], {
taskId: "t",
tools: [],
})

const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

const textChunks = chunks.filter((c) => c.type === "text")
expect(textChunks.length).toBeGreaterThan(0)
expect(textChunks.map((c) => c.text).join("")).toContain("hello from spark")
})

it("yields text when Codex emits assistant message only in response.completed output", async () => {
vi.spyOn(openAiCodexOAuthManager, "getAccessToken").mockResolvedValue("test-token")
vi.spyOn(openAiCodexOAuthManager, "getAccountId").mockResolvedValue("acct_test")
;(handler as any).client = {
responses: {
create: vi.fn().mockResolvedValue({
async *[Symbol.asyncIterator]() {
yield {
type: "response.completed",
response: {
id: "resp_completed_only",
status: "completed",
output: [
{
type: "message",
role: "assistant",
content: [{ type: "output_text", text: "final payload only" }],
},
],
usage: { input_tokens: 1, output_tokens: 2 },
},
}
},
}),
},
}

const stream = handler.createMessage("system", [{ role: "user", content: "test" } as any], {
taskId: "t",
tools: [],
})

const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

const textChunks = chunks.filter((c) => c.type === "text")
expect(textChunks.length).toBeGreaterThan(0)
expect(textChunks.map((c) => c.text).join("")).toContain("final payload only")
})

it("yields text when Codex emits response.output_text.done without deltas", async () => {
vi.spyOn(openAiCodexOAuthManager, "getAccessToken").mockResolvedValue("test-token")
vi.spyOn(openAiCodexOAuthManager, "getAccountId").mockResolvedValue("acct_test")
;(handler as any).client = {
responses: {
create: vi.fn().mockResolvedValue({
async *[Symbol.asyncIterator]() {
yield {
type: "response.output_text.done",
text: "done-event text only",
}
yield {
type: "response.completed",
response: {
id: "resp_done_text_only",
status: "completed",
output: [],
usage: { input_tokens: 1, output_tokens: 2 },
},
}
},
}),
},
}

const stream = handler.createMessage("system", [{ role: "user", content: "test" } as any], {
taskId: "t",
tools: [],
})

const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

const textChunks = chunks.filter((c) => c.type === "text")
expect(textChunks.length).toBeGreaterThan(0)
expect(textChunks.map((c) => c.text).join("")).toContain("done-event text only")
})

it("yields tool_call when Codex emits function_call only in response.output_item.done", async () => {
vi.spyOn(openAiCodexOAuthManager, "getAccessToken").mockResolvedValue("test-token")
vi.spyOn(openAiCodexOAuthManager, "getAccountId").mockResolvedValue("acct_test")
;(handler as any).client = {
responses: {
create: vi.fn().mockResolvedValue({
async *[Symbol.asyncIterator]() {
yield {
type: "response.output_item.done",
item: {
type: "function_call",
call_id: "call_done_only",
name: "attempt_completion",
arguments: '{"result":"ok"}',
},
output_index: 0,
}
yield {
type: "response.completed",
response: {
id: "resp_done_tool_only",
status: "completed",
output: [],
usage: { input_tokens: 1, output_tokens: 2 },
},
}
},
}),
},
}

const stream = handler.createMessage("system", [{ role: "user", content: "test" } as any], {
taskId: "t",
tools: [],
})

const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

const toolCalls = chunks.filter((c) => c.type === "tool_call")
expect(toolCalls.length).toBeGreaterThan(0)
expect(toolCalls[0]).toMatchObject({
type: "tool_call",
id: "call_done_only",
name: "attempt_completion",
})
})

it("yields text when Codex emits response.content_part.added", async () => {
vi.spyOn(openAiCodexOAuthManager, "getAccessToken").mockResolvedValue("test-token")
vi.spyOn(openAiCodexOAuthManager, "getAccountId").mockResolvedValue("acct_test")
;(handler as any).client = {
responses: {
create: vi.fn().mockResolvedValue({
async *[Symbol.asyncIterator]() {
yield {
type: "response.content_part.added",
part: {
type: "output_text",
text: "content part text",
},
output_index: 0,
content_index: 0,
}
yield {
type: "response.completed",
response: {
id: "resp_content_part",
status: "completed",
output: [],
usage: { input_tokens: 1, output_tokens: 2 },
},
}
},
}),
},
}

const stream = handler.createMessage("system", [{ role: "user", content: "test" } as any], {
taskId: "t",
tools: [],
})

const chunks: any[] = []
for await (const chunk of stream) {
chunks.push(chunk)
}

const textChunks = chunks.filter((c) => c.type === "text")
expect(textChunks.length).toBeGreaterThan(0)
expect(textChunks.map((c) => c.text).join("")).toContain("content part text")
})
})
12 changes: 11 additions & 1 deletion src/api/providers/__tests__/openai-codex.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import { OpenAiCodexHandler } from "../openai-codex"

describe("OpenAiCodexHandler.getModel", () => {
it.each(["gpt-5.1", "gpt-5", "gpt-5.1-codex", "gpt-5-codex", "gpt-5-codex-mini"])(
it.each(["gpt-5.1", "gpt-5", "gpt-5.1-codex", "gpt-5-codex", "gpt-5-codex-mini", "gpt-5.3-codex-spark"])(
"should return specified model when a valid model id is provided: %s",
(apiModelId) => {
const handler = new OpenAiCodexHandler({ apiModelId })
Expand All @@ -23,4 +23,14 @@ describe("OpenAiCodexHandler.getModel", () => {
expect(model.id).toBe("gpt-5.3-codex")
expect(model.info).toBeDefined()
})

it("should use Spark-specific limits and capabilities", () => {
const handler = new OpenAiCodexHandler({ apiModelId: "gpt-5.3-codex-spark" })
const model = handler.getModel()

expect(model.id).toBe("gpt-5.3-codex-spark")
expect(model.info.contextWindow).toBe(128000)
expect(model.info.maxTokens).toBe(8192)
expect(model.info.supportsImages).toBe(false)
})
})
Loading
Loading