Skip to content

Commit

Permalink
added attachments option to existing planner flow
Browse files Browse the repository at this point in the history
  • Loading branch information
lilyydu committed Sep 18, 2024
1 parent 44a07cb commit fc16d98
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 6 deletions.
4 changes: 3 additions & 1 deletion js/packages/teams-ai/src/models/OpenAIModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import { Tokenizer } from '../tokenizers';
import { ActionCall, PromptResponse } from '../types';

import { PromptCompletionModel, PromptCompletionModelEmitter } from './PromptCompletionModel';
import { StreamingResponse } from '../StreamingResponse';

/**
* Base model options common to both OpenAI and Azure OpenAI services.
Expand Down Expand Up @@ -436,7 +437,8 @@ export class OpenAIModel implements PromptCompletionModel {

// Signal response received
const response: PromptResponse<string> = { status: 'success', input, message };
this._events.emit('responseReceived', context, memory, response);
const streamer: StreamingResponse = memory.getValue("temp.streamer");
this._events.emit('responseReceived', context, memory, response, streamer);

// Let any pending events flush before returning
await new Promise((resolve) => setTimeout(resolve, 0));
Expand Down
6 changes: 4 additions & 2 deletions js/packages/teams-ai/src/models/PromptCompletionModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import { Tokenizer } from '../tokenizers';
import { PromptResponse } from '../types';
import { Memory } from '../MemoryFork';
import StrictEventEmitter from '../external/strict-event-emitter-types';
import { StreamingResponse } from '../StreamingResponse';

/**
* Events emitted by a PromptCompletionModel.
Expand Down Expand Up @@ -51,7 +52,7 @@ export interface PromptCompletionModelEvents {
* @param memory An interface for accessing state values.
* @param response Final response returned by the model.
*/
responseReceived: (context: TurnContext, memory: Memory, response: PromptResponse<string>) => void;
responseReceived: (context: TurnContext, memory: Memory, response: PromptResponse<string>, streamer: StreamingResponse) => void;
}

/**
Expand Down Expand Up @@ -81,7 +82,8 @@ export type PromptCompletionModelChunkReceivedEvent = (
export type PromptCompletionModelResponseReceivedEvent = (
context: TurnContext,
memory: Memory,
response: PromptResponse<string>
response: PromptResponse<string>,
streamer: StreamingResponse,
) => void;

/**
Expand Down
7 changes: 6 additions & 1 deletion js/packages/teams-ai/src/planners/ActionPlanner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import { TurnContext } from 'botbuilder';
import { AI } from '../AI';
import { DefaultAugmentation } from '../augmentations';
import { Memory } from '../MemoryFork';
import { PromptCompletionModel } from '../models';
import { PromptCompletionModel, PromptCompletionModelResponseReceivedEvent } from '../models';
import { PromptTemplate, PromptManager } from '../prompts';
import { Tokenizer } from '../tokenizers';
import { TurnState } from '../TurnState';
Expand Down Expand Up @@ -85,6 +85,11 @@ export interface ActionPlannerOptions<TState extends TurnState = TurnState> {
* Optional message to send a client at the start of a streaming response.
*/
startStreamingMessage?: string;

/**
* Optional handler to run when a stream is about to conclude.
*/
endStreamHandler?: PromptCompletionModelResponseReceivedEvent
}

/**
Expand Down
21 changes: 19 additions & 2 deletions js/packages/teams-ai/src/planners/LLMClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ import { Memory, MemoryFork } from '../MemoryFork';
import {
PromptCompletionModel,
PromptCompletionModelBeforeCompletionEvent,
PromptCompletionModelChunkReceivedEvent
PromptCompletionModelChunkReceivedEvent,
PromptCompletionModelResponseReceivedEvent
} from '../models';
import { ConversationHistory, Message, Prompt, PromptFunctions, PromptTemplate } from '../prompts';
import { StreamingResponse } from '../StreamingResponse';
Expand Down Expand Up @@ -91,6 +92,11 @@ export interface LLMClientOptions<TContent = any> {
* Optional message to send a client at the start of a streaming response.
*/
startStreamingMessage?: string;

/**
* Optional handler to run when a stream is about to conclude.
*/
endStreamHandler?: PromptCompletionModelResponseReceivedEvent
}

/**
Expand Down Expand Up @@ -193,6 +199,7 @@ export interface ConfiguredLLMClientOptions<TContent = any> {
*/
export class LLMClient<TContent = any> {
private readonly _startStreamingMessage: string | undefined;
private readonly _endStreamHandler: PromptCompletionModelResponseReceivedEvent | undefined;

/**
* Configured options for this LLMClient instance.
Expand Down Expand Up @@ -226,6 +233,7 @@ export class LLMClient<TContent = any> {
}

this._startStreamingMessage = options.startStreamingMessage;
this._endStreamHandler = options.endStreamHandler;
}

/**
Expand Down Expand Up @@ -290,6 +298,7 @@ export class LLMClient<TContent = any> {

// Create streamer and send initial message
streamer = new StreamingResponse(context);
memory.setValue("temp.streamer", streamer)
if (this._startStreamingMessage) {
streamer.queueInformativeUpdate(this._startStreamingMessage);
}
Expand All @@ -313,6 +322,10 @@ export class LLMClient<TContent = any> {
if (this.options.model.events) {
this.options.model.events.on('beforeCompletion', beforeCompletion);
this.options.model.events.on('chunkReceived', chunkReceived);

if (this._endStreamHandler) {
this.options.model.events.on("responseReceived", this._endStreamHandler)
}
}

try {
Expand All @@ -325,7 +338,7 @@ export class LLMClient<TContent = any> {

// End the stream if streaming
// - We're not listening for the response received event because we can't await the completion of events.
if (streamer) {
if (streamer && !this._endStreamHandler) {
await streamer.endStream();
}

Expand All @@ -335,6 +348,10 @@ export class LLMClient<TContent = any> {
if (this.options.model.events) {
this.options.model.events.off('beforeCompletion', beforeCompletion);
this.options.model.events.off('chunkReceived', chunkReceived);

if (this._endStreamHandler) {
this.options.model.events.off("responseReceived", this._endStreamHandler)
}
}
}
}
Expand Down

0 comments on commit fc16d98

Please sign in to comment.