Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion eslint.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ const compat = new FlatCompat({

export default [
{
ignores: ["**/dist", "**/build", "**/node_modules", "**/*.md", ".contexts"]
ignores: ["**/dist", "**/build", "**/node_modules", "**/*.md", ".contexts", ".mini-agent/**"]
},
...compat.extends(
"eslint:recommended",
Expand Down
143 changes: 120 additions & 23 deletions src/cli/chat-ui.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,18 @@
*/
import type { AiError, LanguageModel } from "@effect/ai"
import type { Error as PlatformError, FileSystem } from "@effect/platform"
import { Cause, Context, Effect, Fiber, Layer, Mailbox, Stream } from "effect"
import { is } from "effect/Schema"
import { Cause, Context, Effect, Fiber, Layer, Mailbox, Schema, Stream } from "effect"
import {
AssistantMessageEvent,
type ContextEvent,
CodemodeResultEvent,
CodemodeValidationErrorEvent,
LLMRequestInterruptedEvent,
TextDeltaEvent,
UserMessageEvent
} from "../context.model.ts"
import { ContextService } from "../context.service.ts"
import type { ContextLoadError, ContextSaveError } from "../errors.ts"
import { type ContextOrCodemodeEvent, ContextService } from "../context.service.ts"
import type { CodeStorageError, ContextLoadError, ContextSaveError } from "../errors.ts"
import type { CurrentLlmConfig } from "../llm-config.ts"
import { streamLLMResponse } from "../llm.ts"
import { type ChatController, runOpenTUIChat } from "./components/opentui-chat.tsx"

type ChatSignal =
Expand All @@ -32,7 +31,7 @@ export class ChatUI extends Context.Tag("@app/ChatUI")<
contextName: string
) => Effect.Effect<
void,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError | CodeStorageError,
LanguageModel.LanguageModel | FileSystem.FileSystem | CurrentLlmConfig
>
}
Expand Down Expand Up @@ -81,7 +80,7 @@ const runChatLoop = (
mailbox: Mailbox.Mailbox<ChatSignal>
): Effect.Effect<
void,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError | CodeStorageError,
LanguageModel.LanguageModel | FileSystem.FileSystem | CurrentLlmConfig
> =>
Effect.fn("ChatUI.runChatLoop")(function*() {
Expand All @@ -97,6 +96,18 @@ type TurnResult =
| { readonly _tag: "continue" }
| { readonly _tag: "exit" }

/** Check if event is displayable in the chat feed */
const isDisplayableEvent = (event: ContextOrCodemodeEvent): boolean =>
Schema.is(TextDeltaEvent)(event) ||
Schema.is(AssistantMessageEvent)(event) ||
Schema.is(CodemodeResultEvent)(event) ||
Schema.is(CodemodeValidationErrorEvent)(event)

/** Check if event triggers continuation (agent loop) */
const triggersContinuation = (event: ContextOrCodemodeEvent): boolean =>
(Schema.is(CodemodeResultEvent)(event) && event.triggerAgentTurn === "after-current-turn") ||
(Schema.is(CodemodeValidationErrorEvent)(event) && event.triggerAgentTurn === "after-current-turn")

const runChatTurn = (
contextName: string,
contextService: Context.Tag.Service<typeof ContextService>,
Expand All @@ -105,7 +116,7 @@ const runChatTurn = (
pendingMessage: string | null
): Effect.Effect<
TurnResult,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError | CodeStorageError,
LanguageModel.LanguageModel | FileSystem.FileSystem | CurrentLlmConfig
> =>
Effect.fn("ChatUI.runChatTurn")(function*() {
Expand All @@ -129,28 +140,105 @@ const runChatTurn = (
}

const userEvent = new UserMessageEvent({ content: userMessage })

yield* contextService.persistEvent(contextName, userEvent)
chat.addEvent(userEvent)

const events = yield* contextService.load(contextName)
let accumulatedText = ""
let needsContinuation = false

// Use contextService.addEvents with codemode enabled
const eventStream = contextService.addEvents(contextName, [userEvent], { codemode: true })

const streamFiber = yield* Effect.fork(
streamLLMResponse(events).pipe(
Stream.tap((event: ContextEvent) =>
eventStream.pipe(
Stream.tap((event: ContextOrCodemodeEvent) =>
Effect.sync(() => {
if (is(TextDeltaEvent)(event)) {
if (Schema.is(TextDeltaEvent)(event)) {
accumulatedText += event.delta
}
if (triggersContinuation(event)) {
needsContinuation = true
}
if (isDisplayableEvent(event)) {
chat.addEvent(event)
}
})
),
Stream.filter(is(AssistantMessageEvent)),
Stream.tap((event) =>
Effect.gen(function*() {
yield* contextService.persistEvent(contextName, event)
chat.addEvent(event)
Stream.runDrain
)
)

const result = yield* awaitStreamCompletion(streamFiber, mailbox)

if (result._tag === "completed") {
// If we need continuation (codemode result with output), run another turn
if (needsContinuation) {
return yield* runAgentContinuation(contextName, contextService, chat, mailbox)
}
return { _tag: "continue" } as const
}

if (result._tag === "exit") {
if (accumulatedText.length > 0) {
const interruptedEvent = new LLMRequestInterruptedEvent({
requestId: crypto.randomUUID(),
reason: "user_cancel",
partialResponse: accumulatedText
})
yield* contextService.persistEvent(contextName, interruptedEvent)
chat.addEvent(interruptedEvent)
}
return { _tag: "exit" } as const
}

// result._tag === "interrupted" - user hit return during streaming
if (accumulatedText.length > 0) {
const interruptedEvent = new LLMRequestInterruptedEvent({
requestId: crypto.randomUUID(),
reason: result.newMessage ? "user_new_message" : "user_cancel",
partialResponse: accumulatedText
})
yield* contextService.persistEvent(contextName, interruptedEvent)
chat.addEvent(interruptedEvent)
}

if (result.newMessage) {
return yield* runChatTurn(contextName, contextService, chat, mailbox, result.newMessage)
}

return { _tag: "continue" } as const
})()

/** Run agent continuation loop (for codemode results that need follow-up) */
const runAgentContinuation = (
contextName: string,
contextService: Context.Tag.Service<typeof ContextService>,
chat: ChatController,
mailbox: Mailbox.Mailbox<ChatSignal>
): Effect.Effect<
TurnResult,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError | CodeStorageError,
LanguageModel.LanguageModel | FileSystem.FileSystem | CurrentLlmConfig
> =>
Effect.fn("ChatUI.runAgentContinuation")(function*() {
let accumulatedText = ""
let needsContinuation = false

// Empty input events - the persisted CodemodeResult triggers the turn
const eventStream = contextService.addEvents(contextName, [], { codemode: true })

const streamFiber = yield* Effect.fork(
eventStream.pipe(
Stream.tap((event: ContextOrCodemodeEvent) =>
Effect.sync(() => {
if (Schema.is(TextDeltaEvent)(event)) {
accumulatedText += event.delta
}
if (triggersContinuation(event)) {
needsContinuation = true
}
if (isDisplayableEvent(event)) {
chat.addEvent(event)
}
})
),
Stream.runDrain
Expand All @@ -160,6 +248,9 @@ const runChatTurn = (
const result = yield* awaitStreamCompletion(streamFiber, mailbox)

if (result._tag === "completed") {
if (needsContinuation) {
return yield* runAgentContinuation(contextName, contextService, chat, mailbox)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Missing iteration limit allows unbounded agent loop recursion

The runAgentContinuation function recursively calls itself at line 252 without any iteration limit, unlike the CLI's runEventStream which uses MAX_AGENT_LOOP_ITERATIONS = 15. This unbounded recursion occurs whenever needsContinuation is true, which happens when CodemodeResultEvent or CodemodeValidationErrorEvent has triggerAgentTurn === "after-current-turn". If the LLM consistently produces typechecking errors, outputs to stdout via console.log(), or fails to include <codemode> tags, the agent loop will recurse indefinitely, potentially causing stack overflow or resource exhaustion in the interactive chat UI.

Additional Locations (1)

Fix in Cursor Fix in Web

}
return { _tag: "continue" } as const
}

Expand All @@ -176,7 +267,7 @@ const runChatTurn = (
return { _tag: "exit" } as const
}

// result._tag === "interrupted" - user hit return during streaming
// Interrupted - save partial and return to wait for input
if (accumulatedText.length > 0) {
const interruptedEvent = new LLMRequestInterruptedEvent({
requestId: crypto.randomUUID(),
Expand All @@ -200,9 +291,15 @@ type StreamResult =
| { readonly _tag: "interrupted"; readonly newMessage: string | null }

const awaitStreamCompletion = (
fiber: Fiber.RuntimeFiber<void, AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError>,
fiber: Fiber.RuntimeFiber<
void,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError | CodeStorageError
>,
mailbox: Mailbox.Mailbox<ChatSignal>
): Effect.Effect<StreamResult, AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError> =>
): Effect.Effect<
StreamResult,
AiError.AiError | PlatformError.PlatformError | ContextLoadError | ContextSaveError | CodeStorageError
> =>
Effect.fn("ChatUI.awaitStreamCompletion")(function*() {
const waitForFiber = Fiber.join(fiber).pipe(Effect.as({ _tag: "completed" } as StreamResult))
const waitForInterrupt = Effect.gen(function*() {
Expand Down
Loading
Loading