Skip to content

Commit e2ab8f3

Browse files
lilyydulilydu
andauthored
[JS] feat: Generated by AI Label, Feedback Loop, Streaming Buffer, Error Propagation, Entities Metadata (#2135)
## Linked issues closes: #1970 ## Details - Added temporary 1.5 second buffer to adhere to 1RPS backend service requirement - Added support for Feedback Loop - Added support for Generated by AI label - Added reject/catch handling for errors - Added `entities` metadata to match GA requirements **screenshots**: ![image](https://github.com/user-attachments/assets/2b5d576e-c00a-4f20-90e9-ba499bff8a7b) ## Attestation Checklist - [x] My code follows the style guidelines of this project - I have checked for/fixed spelling, linting, and other errors - I have commented my code for clarity - I have made corresponding changes to the documentation (updating the doc strings in the code is sufficient) - My changes generate no new warnings - I have added tests that validates my changes, and provides sufficient test coverage. I have tested with: - Local testing - E2E testing in Teams - New and existing unit tests pass locally with my changes --------- Co-authored-by: lilydu <[email protected]>
1 parent feeb230 commit e2ab8f3

File tree

8 files changed

+172
-14
lines changed

8 files changed

+172
-14
lines changed

getting-started/CONCEPTS/STREAMING.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,10 @@ Once `endStream()` is called, the stream is considered ended and no further upda
5151

5252
### Current Limitations:
5353
- Streaming is only available in 1:1 chats.
54+
- SendActivity requests are restricted to 1 RPS. Our SDK buffers to 1.5 seconds.
55+
- For Powered by AI features, only the Feedback Loop and Generated by AI Label is currently supported.
5456
- Only rich text can be streamed.
57+
- Due to future GA protocol changes, the `channelData` metadata must be included in the `entities` object as well.
5558
- Only one informative message can be set. This is reused for each message.
5659
- Examples include:
5760
- “Scanning through documents”
@@ -70,7 +73,8 @@ You can configure streaming with your bot by following these steps:
7073

7174

7275
#### Optional additions:
73-
- Set the informative message in the `ActionPlanner` declaration via the `StartStreamingMessage` config.
76+
- Set the informative message in the `ActionPlanner` declaration via the `StartStreamingMessage` config.
77+
- As previously, set the feedback loop toggle in the `AIOptions` object in the `app` declaration and specify a handler.
7478
- Set attachments in the final chunk via the `EndStreamHandler` in the `ActionPlanner` declaration.
7579

7680
#### C#

js/packages/teams-ai/src/AI.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,13 @@ export class AI<TState extends TurnState = TurnState> {
246246
return this._options.planner;
247247
}
248248

249+
/**
250+
* @returns {boolean} Returns the feedback loop flag.
251+
*/
252+
public get enableFeedbackLoop(): boolean {
253+
return this._options.enable_feedback_loop;
254+
}
255+
249256
/**
250257
* Registers a handler for a named action.
251258
* @remarks

js/packages/teams-ai/src/StreamingResponse.spec.ts

Lines changed: 61 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,11 @@ describe('StreamingResponse', function () {
130130
const activity = adapter.getNextReply();
131131
assert.equal(activity.type, 'message', 'activity.type should be "message"');
132132
assert.equal(activity.text, '', 'activity.text should be ""');
133-
assert.deepEqual(activity.channelData, { streamType: 'final' }, 'activity.channelData should match');
133+
assert.deepEqual(
134+
activity.channelData,
135+
{ streamType: 'final', feedbackLoopEnabled: false },
136+
'activity.channelData should match'
137+
);
134138
});
135139
});
136140

@@ -153,9 +157,63 @@ describe('StreamingResponse', function () {
153157
assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"');
154158
assert.deepEqual(
155159
activities[2].channelData,
156-
{ streamType: 'final', streamId: response.streamId },
160+
{ streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: false },
161+
'final activity channelData should match'
162+
);
163+
});
164+
});
165+
166+
it('should send a final message with powered by AI features', async () => {
167+
const adapter = new TestAdapter();
168+
await adapter.sendTextToBot('test', async (context) => {
169+
const response = new StreamingResponse(context);
170+
response.queueTextChunk('first');
171+
response.queueTextChunk('second');
172+
response.setFeedbackLoop(true);
173+
response.setGeneratedByAILabel(true);
174+
await response.waitForQueue();
175+
await response.endStream();
176+
assert(response.updatesSent == 2, 'updatesSent should be 2');
177+
178+
// Validate sent activities
179+
const activities = adapter.activeQueue;
180+
assert.equal(activities.length, 3, 'should have sent 3 activities');
181+
assert.equal(activities[0].channelData.streamSequence, 1, 'first activity streamSequence should be 1');
182+
assert.equal(activities[0].entities!.length, 1, 'length of first activity entities should be 1');
183+
assert.deepEqual(
184+
activities[0].entities,
185+
[{ type: 'streaminfo', properties: { ...activities[0].channelData } }],
186+
'first activity entities should match'
187+
);
188+
assert.equal(activities[1].channelData.streamSequence, 2, 'second activity streamSequence should be 2');
189+
assert.equal(activities[1].entities!.length, 1, 'length of second activity entities should be 1');
190+
assert.deepEqual(
191+
activities[1].entities,
192+
[{ type: 'streaminfo', properties: { ...activities[1].channelData } }],
193+
'second activity entities should match'
194+
);
195+
assert.equal(activities[2].type, 'message', 'final activity type should be "message"');
196+
assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"');
197+
198+
assert.deepEqual(
199+
activities[2].channelData,
200+
{ streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: true },
157201
'final activity channelData should match'
158202
);
203+
assert.deepEqual(
204+
activities[2].entities,
205+
[
206+
{ type: 'streaminfo', properties: { streamType: 'final', streamId: response.streamId } },
207+
{
208+
type: 'https://schema.org/Message',
209+
'@type': 'Message',
210+
'@context': 'https://schema.org',
211+
'@id': '',
212+
additionalType: ['AIGeneratedContent']
213+
}
214+
],
215+
'final activity entities obj should match'
216+
);
159217
});
160218
});
161219

@@ -191,7 +249,7 @@ describe('StreamingResponse', function () {
191249
assert.equal(activities[2].text, 'firstsecond', 'final activity text should be "firstsecond"');
192250
assert.deepEqual(
193251
activities[2].channelData,
194-
{ streamType: 'final', streamId: response.streamId },
252+
{ streamType: 'final', streamId: response.streamId, feedbackLoopEnabled: false },
195253
'final activity channelData should match'
196254
);
197255
assert.notEqual(activities[2].attachments, null);

js/packages/teams-ai/src/StreamingResponse.ts

Lines changed: 63 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
* Licensed under the MIT License.
77
*/
88

9-
import { Activity, Attachment, TurnContext } from 'botbuilder-core';
9+
import { Activity, Attachment, TurnContext, Entity } from 'botbuilder-core';
10+
import { AIEntity } from './types';
1011

1112
/**
1213
* A helper class for streaming responses to the client.
@@ -31,6 +32,10 @@ export class StreamingResponse {
3132
private _queueSync: Promise<void> | undefined;
3233
private _chunkQueued = false;
3334

35+
// Powered by AI feature flags
36+
private _enableFeedbackLoop = false;
37+
private _enableGeneratedByAILabel = false;
38+
3439
/**
3540
* Creates a new StreamingResponse instance.
3641
* @param {TurnContext} context - Context for the current turn of conversation with the user.
@@ -79,7 +84,7 @@ export class StreamingResponse {
7984
}
8085

8186
/**
82-
* Queues a chunk of partial message text to be sent to the client.
87+
* Queues a chunk of partial message text to be sent to the client
8388
* @remarks
8489
* The text we be sent as quickly as possible to the client. Chunks may be combined before
8590
* delivery to the client.
@@ -111,7 +116,7 @@ export class StreamingResponse {
111116
this.queueNextChunk();
112117

113118
// Wait for the queue to drain
114-
return this._queueSync!;
119+
return this.waitForQueue();
115120
}
116121

117122
/**
@@ -122,6 +127,25 @@ export class StreamingResponse {
122127
this._attachments = attachments;
123128
}
124129

130+
/**
131+
* Sets the Feedback Loop in Teams that allows a user to
132+
* give thumbs up or down to a response.
133+
* Default is `false`.
134+
* @param enableFeedbackLoop If true, the feedback loop is enabled.
135+
*/
136+
public setFeedbackLoop(enableFeedbackLoop: boolean): void {
137+
this._enableFeedbackLoop = enableFeedbackLoop;
138+
}
139+
140+
/**
141+
* Sets the the Generated by AI label in Teams
142+
* Default is `false`.
143+
* @param enableGeneratedByAILabel If true, the label is added.
144+
*/
145+
public setGeneratedByAILabel(enableGeneratedByAILabel: boolean): void {
146+
this._enableGeneratedByAILabel = enableGeneratedByAILabel;
147+
}
148+
125149
/**
126150
* Returns the most recently streamed message.
127151
* @returns The streamed message.
@@ -185,7 +209,10 @@ export class StreamingResponse {
185209

186210
// If there's no sync in progress, start one
187211
if (!this._queueSync) {
188-
this._queueSync = this.drainQueue();
212+
this._queueSync = this.drainQueue().catch((err) => {
213+
console.error(`Error occured when sending activity while streaming: "${err}".`);
214+
throw err;
215+
});
189216
}
190217
}
191218

@@ -195,7 +222,7 @@ export class StreamingResponse {
195222
* @private
196223
*/
197224
private drainQueue(): Promise<void> {
198-
return new Promise<void>(async (resolve) => {
225+
return new Promise<void>(async (resolve, reject) => {
199226
try {
200227
while (this._queue.length > 0) {
201228
// Get next activity from queue
@@ -207,6 +234,8 @@ export class StreamingResponse {
207234
}
208235

209236
resolve();
237+
} catch (err) {
238+
reject(err);
210239
} finally {
211240
// Queue is empty, mark as idle
212241
this._queueSync = undefined;
@@ -227,8 +256,37 @@ export class StreamingResponse {
227256
activity.channelData = Object.assign({}, activity.channelData, { streamId: this._streamId });
228257
}
229258

259+
activity.entities = [
260+
{
261+
type: 'streaminfo',
262+
properties: {
263+
...activity.channelData
264+
}
265+
} as Entity
266+
];
267+
268+
// Add in Powered by AI feature flags
269+
if (this._ended) {
270+
// Add in feedback loop
271+
activity.channelData = Object.assign({}, activity.channelData, {
272+
feedbackLoopEnabled: this._enableFeedbackLoop
273+
});
274+
275+
// Add in Generated by AI
276+
if (this._enableGeneratedByAILabel) {
277+
activity.entities.push({
278+
type: 'https://schema.org/Message',
279+
'@type': 'Message',
280+
'@context': 'https://schema.org',
281+
'@id': '',
282+
additionalType: ['AIGeneratedContent']
283+
} as AIEntity);
284+
}
285+
}
286+
230287
// Send activity
231288
const response = await this._context.sendActivity(activity);
289+
await new Promise((resolve) => setTimeout(resolve, 1.5));
232290

233291
// Save assigned stream ID
234292
if (!this._streamId) {

js/packages/teams-ai/src/planners/ActionPlanner.ts

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,11 @@ export interface ActionPlannerOptions<TState extends TurnState = TurnState> {
9090
* Optional handler to run when a stream is about to conclude.
9191
*/
9292
endStreamHandler?: PromptCompletionModelResponseReceivedEvent;
93+
94+
/**
95+
* If true, the feedback loop will be enabled for streaming responses.
96+
*/
97+
enableFeedbackLoop?: boolean;
9398
}
9499

95100
/**
@@ -116,6 +121,7 @@ export class ActionPlanner<TState extends TurnState = TurnState> implements Plan
116121
private readonly _options: ActionPlannerOptions<TState>;
117122
private readonly _promptFactory: ActionPlannerPromptFactory<TState>;
118123
private readonly _defaultPrompt?: string;
124+
private _enableFeedbackLoop: boolean | undefined;
119125

120126
/**
121127
* Creates a new `ActionPlanner` instance.
@@ -187,6 +193,10 @@ export class ActionPlanner<TState extends TurnState = TurnState> implements Plan
187193
// Identify the augmentation to use
188194
const augmentation = template.augmentation ?? new DefaultAugmentation();
189195

196+
if (ai.enableFeedbackLoop != null) {
197+
this._enableFeedbackLoop = ai.enableFeedbackLoop;
198+
}
199+
190200
// Complete prompt
191201
const result = await this.completePrompt(context, state, template, augmentation);
192202
if (result.status != 'success') {
@@ -265,7 +275,8 @@ export class ActionPlanner<TState extends TurnState = TurnState> implements Plan
265275
max_repair_attempts: this._options.max_repair_attempts,
266276
logRepairs: this._options.logRepairs,
267277
startStreamingMessage: this._options.startStreamingMessage,
268-
endStreamHandler: this._options.endStreamHandler
278+
endStreamHandler: this._options.endStreamHandler,
279+
enableFeedbackLoop: this._enableFeedbackLoop
269280
});
270281

271282
// Complete prompt

js/packages/teams-ai/src/planners/LLMClient.spec.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,8 @@ describe('LLMClient', function () {
114114
const client = new LLMClient({
115115
model: streamingModel,
116116
template,
117-
startStreamingMessage: 'start'
117+
startStreamingMessage: 'start',
118+
enableFeedbackLoop: true
118119
});
119120
const response = await client.completePrompt(context, state, functions);
120121
assert.equal(adapter.activeQueue.length, 4, 'adapter should have 4 messages in the queue');

js/packages/teams-ai/src/planners/LLMClient.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,11 @@ export interface LLMClientOptions<TContent = any> {
9797
* Optional handler to run when a stream is about to conclude.
9898
*/
9999
endStreamHandler?: PromptCompletionModelResponseReceivedEvent;
100+
101+
/**
102+
* If true, the feedback loop will be enabled for streaming responses.
103+
*/
104+
enableFeedbackLoop?: boolean;
100105
}
101106

102107
/**
@@ -200,6 +205,7 @@ export interface ConfiguredLLMClientOptions<TContent = any> {
200205
export class LLMClient<TContent = any> {
201206
private readonly _startStreamingMessage: string | undefined;
202207
private readonly _endStreamHandler: PromptCompletionModelResponseReceivedEvent | undefined;
208+
private readonly _enableFeedbackLoop: boolean | undefined;
203209

204210
/**
205211
* Configured options for this LLMClient instance.
@@ -234,6 +240,7 @@ export class LLMClient<TContent = any> {
234240

235241
this._startStreamingMessage = options.startStreamingMessage;
236242
this._endStreamHandler = options.endStreamHandler;
243+
this._enableFeedbackLoop = options.enableFeedbackLoop;
237244
}
238245

239246
/**
@@ -299,6 +306,13 @@ export class LLMClient<TContent = any> {
299306
// Create streamer and send initial message
300307
streamer = new StreamingResponse(context);
301308
memory.setValue('temp.streamer', streamer);
309+
310+
if (this._enableFeedbackLoop != null) {
311+
streamer.setFeedbackLoop(this._enableFeedbackLoop);
312+
}
313+
314+
streamer.setGeneratedByAILabel(true);
315+
302316
if (this._startStreamingMessage) {
303317
streamer.queueInformativeUpdate(this._startStreamingMessage);
304318
}

js/samples/04.ai-apps/i.teamsChefBot-streaming/src/index.ts

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ const model = new OpenAIModel({
9494

9595
// Azure OpenAI Support
9696
// azureApiKey: process.env.AZURE_OPENAI_KEY!,
97-
// azureDefaultDeployment: 'gpt-3.5-turbo',
97+
// azureDefaultDeployment: 'gpt-4o',
9898
// azureEndpoint: process.env.AZURE_OPENAI_ENDPOINT!,
9999
// azureApiVersion: '2023-03-15-preview',
100100

@@ -141,8 +141,9 @@ const storage = new MemoryStorage();
141141
const app = new Application<ApplicationTurnState>({
142142
storage,
143143
ai: {
144-
planner
145-
}
144+
planner,
145+
enable_feedback_loop: true,
146+
},
146147
});
147148

148149
// Register your data source with planner
@@ -173,6 +174,10 @@ app.ai.action(AI.FlaggedOutputActionName, async (context: TurnContext, state: Ap
173174
return AI.StopCommandName;
174175
});
175176

177+
app.feedbackLoop(async (context, state, feedbackLoopData) => {
178+
console.log("Feedback loop triggered");
179+
});
180+
176181
// Listen for incoming server requests.
177182
server.post('/api/messages', async (req, res) => {
178183
// Route received a request to adapter for processing

0 commit comments

Comments
 (0)