diff --git a/js/.nycrc b/js/.nycrc index fb53ca077..079a26f09 100644 --- a/js/.nycrc +++ b/js/.nycrc @@ -7,7 +7,7 @@ "**/coverage/**", "**/*.d.ts", "**/*.spec.ts", - "packages/**/src/index.ts" + "packages/**/src/**/index.ts" ], "reporter": ["html", "text"], "all": true, diff --git a/js/packages/teams-ai/package.json b/js/packages/teams-ai/package.json index 2ff424e03..dd74d1d54 100644 --- a/js/packages/teams-ai/package.json +++ b/js/packages/teams-ai/package.json @@ -2,7 +2,7 @@ "name": "@microsoft/teams-ai", "author": "Microsoft Corp.", "description": "SDK focused on building AI based applications for Microsoft Teams.", - "version": "1.0.0-preview.1", + "version": "1.0.0-preview.2", "license": "MIT", "keywords": [ "botbuilder", diff --git a/js/packages/teams-ai/src/AI.ts b/js/packages/teams-ai/src/AI.ts index 26e32d32c..dd009abc0 100644 --- a/js/packages/teams-ai/src/AI.ts +++ b/js/packages/teams-ai/src/AI.ts @@ -25,7 +25,12 @@ export interface PredictedDoCommandAndHandler extends PredictedDoCommand * @param action Name of the action being executed. * @returns Whether the AI system should continue executing the plan. */ - handler: (context: TurnContext, state: TState, parameters?: Record, action?: string) => Promise; + handler: ( + context: TurnContext, + state: TState, + parameters?: Record, + action?: string + ) => Promise; } /** @@ -225,11 +230,14 @@ export class AI { * @param {ConfiguredAIOptions} options The options used to configure the AI system. */ public constructor(options: AIOptions) { - this._options = Object.assign({ - max_steps: 25, - max_time: 300000, - allow_looping: true - }, options) as ConfiguredAIOptions; + this._options = Object.assign( + { + max_steps: 25, + max_time: 300000, + allow_looping: true + }, + options + ) as ConfiguredAIOptions; // Create moderator if needed if (!this._options.moderator) { @@ -237,52 +245,37 @@ export class AI { } // Register default UnknownAction handler - this.defaultAction( - AI.UnknownActionName, - (context, state, data, action?) => { - console.error(`An AI action named "${action}" was predicted but no handler was registered.`); - return Promise.resolve(AI.StopCommandName); - } - ); + this.defaultAction(AI.UnknownActionName, (context, state, data, action?) => { + console.error(`An AI action named "${action}" was predicted but no handler was registered.`); + return Promise.resolve(AI.StopCommandName); + }); // Register default FlaggedInputAction handler - this.defaultAction( - AI.FlaggedInputActionName, - () => { - console.error( - `The users input has been moderated but no handler was registered for 'AI.FlaggedInputActionName'.` - ); - return Promise.resolve(AI.StopCommandName); - } - ); + this.defaultAction(AI.FlaggedInputActionName, () => { + console.error( + `The users input has been moderated but no handler was registered for 'AI.FlaggedInputActionName'.` + ); + return Promise.resolve(AI.StopCommandName); + }); // Register default FlaggedOutputAction handler - this.defaultAction( - AI.FlaggedOutputActionName, - () => { - console.error( - `The bots output has been moderated but no handler was registered for 'AI.FlaggedOutputActionName'.` - ); - return Promise.resolve(AI.StopCommandName); - } - ); + this.defaultAction(AI.FlaggedOutputActionName, () => { + console.error( + `The bots output has been moderated but no handler was registered for 'AI.FlaggedOutputActionName'.` + ); + return Promise.resolve(AI.StopCommandName); + }); // Register default HttpErrorActionName - this.defaultAction( - AI.HttpErrorActionName, - (context, state, data, action) => { - throw new Error(`An AI http request failed`); - } - ); + this.defaultAction(AI.HttpErrorActionName, (context, state, data, action) => { + throw new Error(`An AI http request failed`); + }); // Register default PlanReadyActionName - this.defaultAction( - AI.PlanReadyActionName, - (context, state, plan) => { - const isValid = Array.isArray(plan.commands) && plan.commands.length > 0; - return Promise.resolve(!isValid ? AI.StopCommandName : ''); - } - ); + this.defaultAction(AI.PlanReadyActionName, (context, state, plan) => { + const isValid = Array.isArray(plan.commands) && plan.commands.length > 0; + return Promise.resolve(!isValid ? AI.StopCommandName : ''); + }); // Register default DoCommandActionName this.defaultAction>( @@ -294,32 +287,26 @@ export class AI { ); // Register default SayCommandActionName - this.defaultAction( - AI.SayCommandActionName, - async (context, state, data, action) => { - const response = data.response; - if (context.activity.channelId == Channels.Msteams) { - await context.sendActivity(response.split('\n').join('
')); - } else { - await context.sendActivity(response); - } - - return ''; + this.defaultAction(AI.SayCommandActionName, async (context, state, data, action) => { + const response = data.response; + if (context.activity.channelId == Channels.Msteams) { + await context.sendActivity(response.split('\n').join('
')); + } else { + await context.sendActivity(response); } - ); + + return ''; + }); // Register default TooManyStepsActionName - this.defaultAction( - AI.TooManyStepsActionName, - async (context, state, data, action) => { - const { max_steps, step_count } = data; - if (step_count > max_steps) { - throw new Error(`The AI system has exceeded the maximum number of steps allowed.`); - } else { - throw new Error(`The AI system has exceeded the maximum amount of time allowed.`); - } + this.defaultAction(AI.TooManyStepsActionName, async (context, state, data, action) => { + const { max_steps, step_count } = data; + if (step_count > max_steps) { + throw new Error(`The AI system has exceeded the maximum number of steps allowed.`); + } else { + throw new Error(`The AI system has exceeded the maximum amount of time allowed.`); } - ); + }); } /** @@ -391,7 +378,7 @@ export class AI { * @param handler Function to call when the action is triggered. * @returns The AI system instance for chaining purposes. */ - public defaultAction | undefined)>( + public defaultAction | undefined>( name: string | string[], handler: (context: TurnContext, state: TState, parameters: TParameters, action?: string) => Promise ): this { @@ -434,7 +421,6 @@ export class AI { return this._actions.has(action); } - /** * Calls the configured planner to generate a plan and executes the plan that is returned. * @remarks @@ -447,23 +433,7 @@ export class AI { * @param step_count Optional. Number of steps that have been executed. * @returns True if the plan was completely executed, otherwise false. */ - public async run( - context: TurnContext, - state: TState, - start_time?: number, - step_count?: number - ): Promise { - // Populate {{$temp.input}} - if (typeof state.temp.input != 'string') { - // Use the received activity text - state.temp.input = context.activity.text; - } - - // Initialize {{$allOutputs}} - if (state.temp.actionOutputs == undefined) { - state.temp.actionOutputs = {}; - } - + public async run(context: TurnContext, state: TState, start_time?: number, step_count?: number): Promise { // Initialize start time and action count const { max_steps, max_time } = this._options; if (start_time === undefined) { @@ -474,7 +444,8 @@ export class AI { } // Review input on first loop - let plan: Plan|undefined = step_count == 0 ? await this._options.moderator.reviewInput(context, state) : undefined; + let plan: Plan | undefined = + step_count == 0 ? await this._options.moderator.reviewInput(context, state) : undefined; // Generate plan if (!plan) { @@ -490,7 +461,9 @@ export class AI { // Process generated plan let completed = false; - let response = await this._actions.get(AI.PlanReadyActionName)!.handler(context, state, plan, AI.PlanReadyActionName); + const response = await this._actions + .get(AI.PlanReadyActionName)! + .handler(context, state, plan, AI.PlanReadyActionName); if (response == AI.StopCommandName) { return false; } @@ -503,8 +476,15 @@ export class AI { // Check for timeout if (Date.now() - start_time! > max_time || ++step_count! > max_steps) { completed = false; - const parameters: TooManyStepsParameters = { max_steps, max_time, start_time: start_time!, step_count: step_count! }; - await this._actions.get(AI.TooManyStepsActionName)!.handler(context, state, parameters, AI.TooManyStepsActionName); + const parameters: TooManyStepsParameters = { + max_steps, + max_time, + start_time: start_time!, + step_count: step_count! + }; + await this._actions + .get(AI.TooManyStepsActionName)! + .handler(context, state, parameters, AI.TooManyStepsActionName); break; } @@ -524,9 +504,7 @@ export class AI { state.temp.actionOutputs[action] = output; } else { // Redirect to UnknownAction handler - output = await this._actions - .get(AI.UnknownActionName)! - .handler(context, state, plan, action); + output = await this._actions.get(AI.UnknownActionName)!.handler(context, state, plan, action); } break; } @@ -549,6 +527,7 @@ export class AI { // Copy the actions output to the input state.temp.lastOutput = output; state.temp.input = output; + state.temp.inputFiles = []; } // Check for looping diff --git a/js/packages/teams-ai/src/Application.ts b/js/packages/teams-ai/src/Application.ts index fe049480e..09dc4afbe 100644 --- a/js/packages/teams-ai/src/Application.ts +++ b/js/packages/teams-ai/src/Application.ts @@ -22,17 +22,18 @@ import { ReadReceiptInfo } from 'botframework-connector'; import { AdaptiveCards, AdaptiveCardsOptions } from './AdaptiveCards'; import { AI, AIOptions } from './AI'; +import { Meetings } from './Meetings'; import { MessageExtensions } from './MessageExtensions'; import { TaskModules, TaskModulesOptions } from './TaskModules'; import { AuthenticationManager, AuthenticationOptions } from './authentication/Authentication'; import { TurnState } from './TurnState'; +import { InputFileDownloader } from './InputFileDownloader'; import { deleteUserInSignInFlow, setTokenInState, setUserInSignInFlow, userInSignInFlow } from './authentication/BotAuthenticationBase'; -import { Meetings } from './Meetings'; /** * @private @@ -133,6 +134,11 @@ export interface ApplicationOptions { * Optional. Factory used to create a custom turn state instance. */ turnStateFactory: () => TState; + + /** + * Optional. Array of input file download plugins to use. + */ + fileDownloaders?: InputFileDownloader[]; } /** @@ -668,6 +674,27 @@ export class Application { return false; } + // Populate {{$temp.input}} + if (typeof state.temp.input != 'string') { + // Use the received activity text + state.temp.input = context.activity.text; + } + + // Download any input files + if (Array.isArray(this._options.fileDownloaders) && this._options.fileDownloaders.length > 0) { + const inputFiles = state.temp.inputFiles ?? []; + for (let i = 0; i < this._options.fileDownloaders.length; i++) { + const files = await this._options.fileDownloaders[i].downloadFiles(context, state); + inputFiles.push(...files); + } + state.temp.inputFiles = inputFiles; + } + + // Initialize {{$allOutputs}} + if (state.temp.actionOutputs == undefined) { + state.temp.actionOutputs = {}; + } + // Run any RouteSelectors in this._invokeRoutes first if the incoming Teams activity.type is "Invoke". // Invoke Activities from Teams need to be responded to in less than 5 seconds. if (context.activity.type === ActivityTypes.Invoke) { diff --git a/js/packages/teams-ai/src/InputFileDownloader.ts b/js/packages/teams-ai/src/InputFileDownloader.ts new file mode 100644 index 000000000..f4ae20e22 --- /dev/null +++ b/js/packages/teams-ai/src/InputFileDownloader.ts @@ -0,0 +1,43 @@ +/** + * @module teams-ai + */ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +import { TurnContext } from 'botbuilder'; +import { TurnState } from './TurnState'; + +/** + * A plugin responsible for downloading files relative to the current user's input. + * @template TState Optional. Type of application state. + */ +export interface InputFileDownloader { + /** + * Download any files relative to the current user's input. + * @param context Context for the current turn of conversation. + * @param state Application state for the current turn of conversation. + */ + downloadFiles(context: TurnContext, state: TState): Promise; +} + +/** + * A file sent by the user to the bot. + */ +export interface InputFile { + /** + * The downloaded content of the file. + */ + content: Buffer; + + /** + * The content type of the file. + */ + contentType: string; + + /** + * Optional. URL to the content of the file. + */ + contentUrl?: string; +} diff --git a/js/packages/teams-ai/src/TeamsAttachmentDownloader.ts b/js/packages/teams-ai/src/TeamsAttachmentDownloader.ts new file mode 100644 index 000000000..317d87211 --- /dev/null +++ b/js/packages/teams-ai/src/TeamsAttachmentDownloader.ts @@ -0,0 +1,129 @@ +/** + * @module teams-ai + */ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +import axios, { AxiosInstance } from 'axios'; +import { Attachment, TurnContext } from 'botbuilder'; +import { TurnState } from './TurnState'; +import { InputFile, InputFileDownloader } from './InputFileDownloader'; + +/** + * Options for the `TeamsAttachmentDownloader` class. + */ +export interface TeamsAttachmentDownloaderOptions { + /** + * The Microsoft App ID of the bot. + */ + botAppId: string; + + /** + * The Microsoft App Password of the bot. + */ + botAppPassword: string; +} + +/** + * Downloads attachments from Teams using the bots access token. + */ +export class TeamsAttachmentDownloader implements InputFileDownloader { + private readonly _options: TeamsAttachmentDownloaderOptions; + private _httpClient: AxiosInstance; + + /** + * Creates a new instance of the `TeamsAttachmentDownloader` class. + * @param options Options for the `TeamsAttachmentDownloader` class. + */ + public constructor(options: TeamsAttachmentDownloaderOptions) { + this._options = options; + this._httpClient = axios.create(); + } + + /** + * Download any files relative to the current user's input. + * @param context Context for the current turn of conversation. + * @param state Application state for the current turn of conversation. + */ + public async downloadFiles(context: TurnContext, state: TState): Promise { + // Filter out HTML attachments + const attachments = context.activity.attachments?.filter((a) => !a.contentType.startsWith('text/html')); + if (!attachments || attachments.length === 0) { + return Promise.resolve([]); + } + + // Download all attachments + const accessToken = await this.getAccessToken(); + const files: InputFile[] = []; + for (const attachment of attachments) { + const file = await this.downloadFile(attachment, accessToken); + if (file) { + files.push(file); + } + } + + return files; + } + + /** + * @private + */ + private async downloadFile(attachment: Attachment, accessToken: string): Promise { + if (attachment.contentUrl && attachment.contentUrl.startsWith('https://')) { + // Download file + const headers = { + 'Authorization': `Bearer ${accessToken}` + }; + const response = await this._httpClient.get(attachment.contentUrl, { + headers, + responseType: 'arraybuffer' + }); + + // Convert to a buffer + const content = Buffer.from(response.data, 'binary'); + + // Fixup content type + let contentType = attachment.contentType; + if (contentType === 'image/*') { + contentType = 'image/png'; + } + + // Return file + return { + content, + contentType, + contentUrl: attachment.contentUrl, + }; + } else { + return { + content: Buffer.from(attachment.content), + contentType: attachment.contentType, + contentUrl: attachment.contentUrl, + }; + } + } + + /** + * @private + */ + private async getAccessToken(): Promise { + const headers = { + 'Content-Type': 'application/x-www-form-urlencoded' + }; + const body = `grant_type=client_credentials&client_id=${encodeURI(this._options.botAppId)}&client_secret=${encodeURI(this._options.botAppPassword)}&scope=https%3A%2F%2Fapi.botframework.com%2F.default`; + const token = await this._httpClient.post('https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token', body, { headers }); + return token.data.access_token; + } +} + +/** + * @private + */ +interface JWTToken { + token_type: string; + expires_in: number; + ext_expires_in: number; + access_token: string; +} diff --git a/js/packages/teams-ai/src/TurnState.ts b/js/packages/teams-ai/src/TurnState.ts index bacef182f..67de1d1ac 100644 --- a/js/packages/teams-ai/src/TurnState.ts +++ b/js/packages/teams-ai/src/TurnState.ts @@ -8,6 +8,7 @@ import { TurnContext, Storage, StoreItems } from 'botbuilder'; import { Memory } from './MemoryFork'; +import { InputFile } from './InputFileDownloader'; /** * @private @@ -50,14 +51,14 @@ export interface DefaultUserState {} */ export interface DefaultTempState { /** - * Input passed to an AI prompt + * Input passed from the user to the AI Library */ input: string; /** - * Formatted conversation history for embedding in an AI prompt + * Downloaded files passed by the user to the AI Library */ - history: string; + inputFiles: InputFile[]; /** * Output returned from the last executed action @@ -77,7 +78,7 @@ export interface DefaultTempState { /** * Flag indicating whether a token exchange event has already been processed */ - duplicateTokenExchange?: boolean + duplicateTokenExchange?: boolean; } /** diff --git a/js/packages/teams-ai/src/authentication/Authentication.spec.ts b/js/packages/teams-ai/src/authentication/Authentication.spec.ts index 2b8c0c361..2ec7c3c52 100644 --- a/js/packages/teams-ai/src/authentication/Authentication.spec.ts +++ b/js/packages/teams-ai/src/authentication/Authentication.spec.ts @@ -48,7 +48,7 @@ describe('Authentication', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} @@ -85,7 +85,10 @@ describe('Authentication', () => { it('should call botAuth.authenticate() when activity type is message and the text is a non-empty string', async () => { const isUserSignedInStub = sinon.stub(auth, 'isUserSignedIn').returns(Promise.resolve(undefined)); - const [context, state] = await createTurnContextAndState({ type: ActivityTypes.Message, text: 'non empty' }); + const [context, state] = await createTurnContextAndState({ + type: ActivityTypes.Message, + text: 'non empty' + }); await auth.signInUser(context, state); @@ -291,7 +294,7 @@ describe('AuthenticationManager', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/authentication/BotAuthenticationBase.spec.ts b/js/packages/teams-ai/src/authentication/BotAuthenticationBase.spec.ts index 9465081c8..9a75edf6b 100644 --- a/js/packages/teams-ai/src/authentication/BotAuthenticationBase.spec.ts +++ b/js/packages/teams-ai/src/authentication/BotAuthenticationBase.spec.ts @@ -36,7 +36,7 @@ describe('BotAuthenticationBase.ts utility functions', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/authentication/OAuthAdaptiveCardAuthentication.spec.ts b/js/packages/teams-ai/src/authentication/OAuthAdaptiveCardAuthentication.spec.ts index b620d8f83..3d1997718 100644 --- a/js/packages/teams-ai/src/authentication/OAuthAdaptiveCardAuthentication.spec.ts +++ b/js/packages/teams-ai/src/authentication/OAuthAdaptiveCardAuthentication.spec.ts @@ -34,7 +34,7 @@ describe('AdaptiveCardAuthenticaion', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/authentication/OAuthBotAuthentication.spec.ts b/js/packages/teams-ai/src/authentication/OAuthBotAuthentication.spec.ts index 3e1166123..5b46b55a1 100644 --- a/js/packages/teams-ai/src/authentication/OAuthBotAuthentication.spec.ts +++ b/js/packages/teams-ai/src/authentication/OAuthBotAuthentication.spec.ts @@ -39,7 +39,7 @@ describe('BotAuthentication', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/authentication/OAuthMessageExtensionAuthentication.spec.ts b/js/packages/teams-ai/src/authentication/OAuthMessageExtensionAuthentication.spec.ts index 3af46f2bf..b4fa05782 100644 --- a/js/packages/teams-ai/src/authentication/OAuthMessageExtensionAuthentication.spec.ts +++ b/js/packages/teams-ai/src/authentication/OAuthMessageExtensionAuthentication.spec.ts @@ -33,7 +33,7 @@ describe('OAuthPromptMessageExtensionAuthentication', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/authentication/TeamsSsoBotAuthentication.spec.ts b/js/packages/teams-ai/src/authentication/TeamsSsoBotAuthentication.spec.ts index 2f204807d..de6dd7e46 100644 --- a/js/packages/teams-ai/src/authentication/TeamsSsoBotAuthentication.spec.ts +++ b/js/packages/teams-ai/src/authentication/TeamsSsoBotAuthentication.spec.ts @@ -42,7 +42,7 @@ describe('TeamsSsoBotAuthentication', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/authentication/TeamsSsoMessageExtensionAuthentication.spec.ts b/js/packages/teams-ai/src/authentication/TeamsSsoMessageExtensionAuthentication.spec.ts index 1e961c1a9..298c4e50e 100644 --- a/js/packages/teams-ai/src/authentication/TeamsSsoMessageExtensionAuthentication.spec.ts +++ b/js/packages/teams-ai/src/authentication/TeamsSsoMessageExtensionAuthentication.spec.ts @@ -46,7 +46,7 @@ describe('TeamsSsoMessageExtensionAuthentication', () => { await state.load(context); state.temp = { input: '', - history: '', + inputFiles: [], lastOutput: '', actionOutputs: {}, authTokens: {} diff --git a/js/packages/teams-ai/src/index.ts b/js/packages/teams-ai/src/index.ts index 2914c38a1..401bfc997 100644 --- a/js/packages/teams-ai/src/index.ts +++ b/js/packages/teams-ai/src/index.ts @@ -19,9 +19,11 @@ export * from './validators'; export * from './AdaptiveCards'; export * from './AI'; export * from './Application'; +export * from './InputFileDownloader'; export * from './MemoryFork'; export * from './MessageExtensions'; export * from './TaskModules'; +export * from './TeamsAttachmentDownloader'; export * from './TestTurnState'; export * from './TurnState'; export * from './Utilities'; diff --git a/js/packages/teams-ai/src/models/OpenAIModel.ts b/js/packages/teams-ai/src/models/OpenAIModel.ts index 6a1ba070b..cf65bddad 100644 --- a/js/packages/teams-ai/src/models/OpenAIModel.ts +++ b/js/packages/teams-ai/src/models/OpenAIModel.ts @@ -7,7 +7,7 @@ */ import axios, { AxiosInstance, AxiosResponse, AxiosRequestConfig } from 'axios'; -import { PromptFunctions, PromptTemplate } from "../prompts"; +import { Message, PromptFunctions, PromptTemplate } from "../prompts"; import { PromptCompletionModel, PromptResponse } from "./PromptCompletionModel"; import { ChatCompletionRequestMessage, CreateChatCompletionRequest, CreateChatCompletionResponse, CreateCompletionRequest, CreateCompletionResponse, OpenAICreateChatCompletionRequest, OpenAICreateCompletionRequest } from "../internals"; import { Tokenizer } from "../tokenizers"; @@ -175,11 +175,13 @@ export class OpenAIModel implements PromptCompletionModel { public async completePrompt(context: TurnContext, memory: Memory, functions: PromptFunctions, tokenizer: Tokenizer, template: PromptTemplate): Promise> { const startTime = Date.now(); const max_input_tokens = template.config.completion.max_input_tokens; - if (this.options.completion_type == 'text') { + const completion_type = template.config.completion.completion_type ?? this.options.completion_type; + const model = template.config.completion.model ?? (this._useAzure ? (this.options as AzureOpenAIModelOptions).azureDefaultDeployment : (this.options as OpenAIModelOptions).defaultModel); + if (completion_type == 'text') { // Render prompt const result = await template.prompt.renderAsText(context, memory, functions, tokenizer, max_input_tokens); if (result.tooLong) { - return { status: 'too_long', error: new Error(`The generated text completion prompt had a length of ${result.length} tokens which exceeded the max_input_tokens of ${max_input_tokens}.`) }; + return { status: 'too_long', input: undefined, error: new Error(`The generated text completion prompt had a length of ${result.length} tokens which exceeded the max_input_tokens of ${max_input_tokens}.`) }; } if (this.options.logRequests) { console.log(Colorize.title('PROMPT:')); @@ -189,8 +191,8 @@ export class OpenAIModel implements PromptCompletionModel { // Call text completion API const request: CreateCompletionRequest = this.copyOptionsToRequest({ prompt: result.output, - }, this.options, ['max_tokens', 'temperature', 'top_p', 'n', 'stream', 'logprobs', 'echo', 'stop', 'presence_penalty', 'frequency_penalty', 'best_of', 'logit_bias', 'user']); - const response = await this.createCompletion(request); + }, template.config.completion, ['max_tokens', 'temperature', 'top_p', 'n', 'stream', 'logprobs', 'echo', 'stop', 'presence_penalty', 'frequency_penalty', 'best_of', 'logit_bias', 'user']); + const response = await this.createCompletion(request, model); if (this.options.logRequests) { console.log(Colorize.title('RESPONSE:')); console.log(Colorize.value('status', response.status)); @@ -201,21 +203,21 @@ export class OpenAIModel implements PromptCompletionModel { // Process response if (response.status < 300) { const completion = response.data.choices[0]; - return { status: 'success', message: { role: 'assistant', content: completion.text ?? '' } }; + return { status: 'success', input: undefined, message: { role: 'assistant', content: completion.text ?? '' } }; } else if (response.status == 429) { if (this.options.logRequests) { console.log(Colorize.title('HEADERS:')); console.log(Colorize.output(response.headers)); } - return { status: 'rate_limited', error: new Error(`The text completion API returned a rate limit error.`) } + return { status: 'rate_limited', input: undefined, error: new Error(`The text completion API returned a rate limit error.`) } } else { - return { status: 'error', error: new Error(`The text completion API returned an error status of ${response.status}: ${response.statusText}`) }; + return { status: 'error', input: undefined, error: new Error(`The text completion API returned an error status of ${response.status}: ${response.statusText}`) }; } } else { // Render prompt const result = await template.prompt.renderAsMessages(context, memory, functions, tokenizer, max_input_tokens); if (result.tooLong) { - return { status: 'too_long', error: new Error(`The generated chat completion prompt had a length of ${result.length} tokens which exceeded the max_input_tokens of ${max_input_tokens}.`) }; + return { status: 'too_long', input: undefined, error: new Error(`The generated chat completion prompt had a length of ${result.length} tokens which exceeded the max_input_tokens of ${max_input_tokens}.`) }; } if (!this.options.useSystemMessages && result.output.length > 0 && result.output[0].role == 'system') { result.output[0].role = 'user'; @@ -225,11 +227,19 @@ export class OpenAIModel implements PromptCompletionModel { console.log(Colorize.output(result.output)); } + // Get input message + // - we're doing this here because the input message can be complex and include images. + let input: Message|undefined; + const last = result.output.length - 1; + if (last > 0 && result.output[last].role == 'user') { + input = result.output[last]; + } + // Call chat completion API const request: CreateChatCompletionRequest = this.copyOptionsToRequest({ messages: result.output as ChatCompletionRequestMessage[], - }, this.options, ['max_tokens', 'temperature', 'top_p', 'n', 'stream', 'logprobs', 'echo', 'stop', 'presence_penalty', 'frequency_penalty', 'best_of', 'logit_bias', 'user', 'functions', 'function_call']); - const response = await this.createChatCompletion(request); + }, template.config.completion, ['max_tokens', 'temperature', 'top_p', 'n', 'stream', 'logprobs', 'echo', 'stop', 'presence_penalty', 'frequency_penalty', 'best_of', 'logit_bias', 'user', 'functions', 'function_call']); + const response = await this.createChatCompletion(request, model); if (this.options.logRequests) { console.log(Colorize.title('CHAT RESPONSE:')); console.log(Colorize.value('status', response.status)); @@ -240,15 +250,15 @@ export class OpenAIModel implements PromptCompletionModel { // Process response if (response.status < 300) { const completion = response.data.choices[0]; - return { status: 'success', message: completion.message ?? { role: 'assistant', content: '' } }; + return { status: 'success', input, message: completion.message ?? { role: 'assistant', content: '' } }; } else if (response.status == 429) { if (this.options.logRequests) { console.log(Colorize.title('HEADERS:')); console.log(Colorize.output(response.headers)); } - return { status: 'rate_limited', error: new Error(`The chat completion API returned a rate limit error.`) } + return { status: 'rate_limited', input: undefined, error: new Error(`The chat completion API returned a rate limit error.`) } } else { - return { status: 'error', error: new Error(`The chat completion API returned an error status of ${response.status}: ${response.statusText}`) }; + return { status: 'error', input: undefined, error: new Error(`The chat completion API returned an error status of ${response.status}: ${response.statusText}`) }; } } } @@ -269,15 +279,15 @@ export class OpenAIModel implements PromptCompletionModel { /** * @private */ - protected createCompletion(request: CreateCompletionRequest): Promise> { + protected createCompletion(request: CreateCompletionRequest, model: string): Promise> { if (this._useAzure) { const options = this.options as AzureOpenAIModelOptions; - const url = `${options.azureEndpoint}/openai/deployments/${options.azureDefaultDeployment}/completions?api-version=${options.azureApiVersion!}`; + const url = `${options.azureEndpoint}/openai/deployments/${model}/completions?api-version=${options.azureApiVersion!}`; return this.post(url, request); } else { const options = this.options as OpenAIModelOptions; const url = `${options.endpoint ?? 'https://api.openai.com'}/v1/completions`; - (request as OpenAICreateCompletionRequest).model = options.defaultModel; + (request as OpenAICreateCompletionRequest).model = model; return this.post(url, request); } } @@ -285,15 +295,15 @@ export class OpenAIModel implements PromptCompletionModel { /** * @private */ - protected createChatCompletion(request: CreateChatCompletionRequest): Promise> { + protected createChatCompletion(request: CreateChatCompletionRequest, model: string): Promise> { if (this._useAzure) { const options = this.options as AzureOpenAIModelOptions; - const url = `${options.azureEndpoint}/openai/deployments/${options.azureDefaultDeployment}/chat/completions?api-version=${options.azureApiVersion!}`; + const url = `${options.azureEndpoint}/openai/deployments/${model}/chat/completions?api-version=${options.azureApiVersion!}`; return this.post(url, request); } else { const options = this.options as OpenAIModelOptions; const url = `${options.endpoint ?? 'https://api.openai.com'}/v1/chat/completions`; - (request as OpenAICreateChatCompletionRequest).model = options.defaultModel; + (request as OpenAICreateChatCompletionRequest).model = model; return this.post(url, request); } } diff --git a/js/packages/teams-ai/src/models/PromptCompletionModel.ts b/js/packages/teams-ai/src/models/PromptCompletionModel.ts index 2167ca3d5..4c379336c 100644 --- a/js/packages/teams-ai/src/models/PromptCompletionModel.ts +++ b/js/packages/teams-ai/src/models/PromptCompletionModel.ts @@ -48,6 +48,11 @@ export interface PromptResponse { */ status: PromptResponseStatus; + /** + * User input message sent to the model. `undefined` if no input was sent. + */ + input?: Message; + /** * Message returned. * @remarks diff --git a/js/packages/teams-ai/src/models/TestModel.spec.ts b/js/packages/teams-ai/src/models/TestModel.spec.ts index 89e7665f8..5512dae52 100644 --- a/js/packages/teams-ai/src/models/TestModel.spec.ts +++ b/js/packages/teams-ai/src/models/TestModel.spec.ts @@ -20,6 +20,7 @@ describe("TestModel", () => { frequency_penalty: 0, include_history: true, include_input: true, + include_images: false, max_input_tokens: 100, max_tokens: 100, presence_penalty: 0, diff --git a/js/packages/teams-ai/src/models/TestModel.ts b/js/packages/teams-ai/src/models/TestModel.ts index 3013df831..f7e11cb96 100644 --- a/js/packages/teams-ai/src/models/TestModel.ts +++ b/js/packages/teams-ai/src/models/TestModel.ts @@ -55,9 +55,9 @@ export class TestModel implements PromptCompletionModel { */ public async completePrompt(context: TurnContext, memory: Memory, functions: PromptFunctions, tokenizer: Tokenizer, template: PromptTemplate): Promise> { if (this.error) { - return { status: this.status, error: this.error }; + return { status: this.status, input: undefined, error: this.error }; } else { - return { status: this.status, message: this.response }; + return { status: this.status, input: undefined, message: this.response }; } } } \ No newline at end of file diff --git a/js/packages/teams-ai/src/planners/LLMClient.ts b/js/packages/teams-ai/src/planners/LLMClient.ts index 08db9adbf..36b7e7139 100644 --- a/js/packages/teams-ai/src/planners/LLMClient.ts +++ b/js/packages/teams-ai/src/planners/LLMClient.ts @@ -271,23 +271,11 @@ export class LLMClient { * @param context Current turn context. * @param memory An interface for accessing state values. * @param functions Functions to use when rendering the prompt. - * @param input Optional. Input to use when completing the prompt. * @returns A `PromptResponse` with the status and message. */ - public async completePrompt(context: TurnContext, memory: Memory, functions: PromptFunctions, input?: string): Promise> { + public async completePrompt(context: TurnContext, memory: Memory, functions: PromptFunctions): Promise> { const { model, template, tokenizer, validator, max_repair_attempts, history_variable, input_variable } = this.options; - // Update/get user input - if (input_variable) { - if (typeof input === 'string') { - memory.setValue(input_variable, input); - } else { - input = memory.hasValue(input_variable) ? memory.getValue(input_variable) : '' - } - } else if (!input) { - input = ''; - } - try { // Ask client to complete prompt const response = await model.completePrompt(context, memory, functions, tokenizer, template) as PromptResponse; @@ -296,6 +284,13 @@ export class LLMClient { return response; } + // Get input message + let inputMsg = response.input; + if (!inputMsg && input_variable) { + const content = memory.getValue(input_variable) ?? ''; + inputMsg = { role: 'user', content }; + } + // Validate response const validation = await validator.validateResponse(context, memory, tokenizer, response as PromptResponse, max_repair_attempts); if (validation.valid) { @@ -305,7 +300,7 @@ export class LLMClient { } // Update history and return - this.addInputToHistory(memory, history_variable, input!); + this.addInputToHistory(memory, history_variable, inputMsg!); this.addResponseToHistory(memory, history_variable, response.message!); return response; } @@ -341,7 +336,7 @@ export class LLMClient { // - we never want to save an invalid response to conversation history. // - the caller can take further corrective action, including simply re-trying. if (repair.status === 'success') { - this.addInputToHistory(memory, history_variable, input!); + this.addInputToHistory(memory, history_variable, inputMsg!); this.addResponseToHistory(memory, history_variable, repair.message!); } @@ -349,6 +344,7 @@ export class LLMClient { } catch (err: unknown) { return { status: 'error', + input: undefined, error: err as Error }; } @@ -357,10 +353,10 @@ export class LLMClient { /** * @private */ - private addInputToHistory(memory: Memory, variable: string, input: string): void { - if (variable && input.length > 0) { + private addInputToHistory(memory: Memory, variable: string, input: Message): void { + if (variable) { const history: Message[] = memory.getValue(variable) ?? []; - history.push({ role: 'user', content: input }); + history.push(input); if (history.length > this.options.max_history_messages) { history.splice(0, history.length - this.options.max_history_messages); } @@ -391,7 +387,7 @@ export class LLMClient { // Add response and feedback to repair history const feedback = validation.feedback ?? 'The response was invalid. Try another strategy.'; this.addResponseToHistory(fork, `${history_variable}-repair`, response.message!); - this.addInputToHistory(fork, `${history_variable}-repair`, feedback); + this.addInputToHistory(fork, `${history_variable}-repair`, { role: 'user', content: feedback }); // Append repair history to prompt const repairTemplate = Object.assign({}, template, { @@ -429,6 +425,7 @@ export class LLMClient { if (remaining_attempts <= 0) { return { status: 'invalid_response', + input: undefined, error: new Error(validation.feedback ?? 'The response was invalid. Try another strategy.') }; } diff --git a/js/packages/teams-ai/src/prompts/ConversationHistory.ts b/js/packages/teams-ai/src/prompts/ConversationHistory.ts index 345d15561..76b9eee18 100644 --- a/js/packages/teams-ai/src/prompts/ConversationHistory.ts +++ b/js/packages/teams-ai/src/prompts/ConversationHistory.ts @@ -96,8 +96,14 @@ export class ConversationHistory extends PromptSectionBase { message.content = Utilities.toString(tokenizer, msg.content); } - // Get message length - const length = tokenizer.encode(PromptSectionBase.getMessageText(message)).length; + // Get text message length + let length = tokenizer.encode(PromptSectionBase.getMessageText(message)).length; + + // Add length of any image parts + // TODO: This accounts for low detail images but not high detail images. + if (Array.isArray(message.content)) { + length += message.content.filter((part) => part.type === 'image').length * 85; + } // Add initial message if required if (messages.length === 0 && this.required) { diff --git a/js/packages/teams-ai/src/prompts/Message.ts b/js/packages/teams-ai/src/prompts/Message.ts index 972e69555..96e73c0ec 100644 --- a/js/packages/teams-ai/src/prompts/Message.ts +++ b/js/packages/teams-ai/src/prompts/Message.ts @@ -46,3 +46,30 @@ export interface FunctionCall { */ arguments?: string; } + +export type MessageContentParts = TextContentPart | ImageContentPart; + +export interface TextContentPart { + /** + * Type of the message content. Should always be 'text'. + */ + type: 'text'; + + /** + * The text of the message. + */ + text: string; +} + +export interface ImageContentPart { + /** + * Type of the message content. Should always be 'image'. + */ + type: 'image'; + + /** + * The URL of the image. + */ + image_url: string|{url: string}; +} + diff --git a/js/packages/teams-ai/src/prompts/PromptManager.ts b/js/packages/teams-ai/src/prompts/PromptManager.ts index 761efae71..3b3504aca 100644 --- a/js/packages/teams-ai/src/prompts/PromptManager.ts +++ b/js/packages/teams-ai/src/prompts/PromptManager.ts @@ -23,6 +23,7 @@ import { ConversationHistory } from "./ConversationHistory"; import { UserMessage } from "./UserMessage"; import { GroupSection } from "./GroupSection"; import { Prompt } from "./Prompt"; +import { UserInputMessage } from "./UserInputMessage"; /** * Options used to configure the prompt manager. @@ -319,7 +320,9 @@ export class PromptManager implements PromptFunctions { } // Include user input - if (template.config.completion.include_input) { + if (template.config.completion.include_images) { + sections.push(new UserInputMessage(this.options.max_input_tokens)); + } else if (template.config.completion.include_input) { sections.push(new UserMessage('{{$temp.input}}', this.options.max_input_tokens)); } @@ -364,6 +367,7 @@ export class PromptManager implements PromptFunctions { frequency_penalty: 0.0, include_history: true, include_input: true, + include_images: false, max_tokens: 150, max_input_tokens: 2048, presence_penalty: 0.0, diff --git a/js/packages/teams-ai/src/prompts/PromptSectionBase.ts b/js/packages/teams-ai/src/prompts/PromptSectionBase.ts index c0346a43d..b34499a38 100644 --- a/js/packages/teams-ai/src/prompts/PromptSectionBase.ts +++ b/js/packages/teams-ai/src/prompts/PromptSectionBase.ts @@ -6,7 +6,7 @@ * Licensed under the MIT License. */ -import { Message } from "./Message"; +import { Message, MessageContentParts } from "./Message"; import { PromptFunctions } from "./PromptFunctions"; import { PromptSection, RenderedPromptSection } from "./PromptSection"; import { TurnContext } from 'botbuilder'; @@ -85,7 +85,7 @@ export abstract class PromptSectionBase implements PromptSection { * @param maxTokens Maximum number of tokens allowed for the rendered prompt. * @returns The rendered prompt section. */ - public abstract renderAsMessages(context: TurnContext, memory: Memory, functions: PromptFunctions, tokenizer: Tokenizer, maxTokens: number): Promise>; + public abstract renderAsMessages(context: TurnContext, memory: Memory, functions: PromptFunctions, tokenizer: Tokenizer, maxTokens: number): Promise[]>>; /** * Calculates the token budget for the prompt section. @@ -135,8 +135,10 @@ export abstract class PromptSectionBase implements PromptSection { * @returns The message content as a string. */ public static getMessageText(message: Message): string { - let text = message.content ?? ''; - if (message.function_call) { + let text: MessageContentParts[]|string = message.content ?? ''; + if (Array.isArray(text)) { + text = text.filter((part) => part.type === 'text').map((part) => part.text).join(' '); + } else if (message.function_call) { text = JSON.stringify(message.function_call); } else if (message.name) { text = `${message.name} returned ${text}`; diff --git a/js/packages/teams-ai/src/prompts/PromptTemplate.ts b/js/packages/teams-ai/src/prompts/PromptTemplate.ts index fe594037e..a3315c129 100644 --- a/js/packages/teams-ai/src/prompts/PromptTemplate.ts +++ b/js/packages/teams-ai/src/prompts/PromptTemplate.ts @@ -114,6 +114,14 @@ export interface CompletionConfig { */ include_input: boolean; + /** + * If true, the prompt will be augmented with any images uploaded by the user. + * @remarks + * New in schema version 1.1. + * Defaults to false. + */ + include_images: boolean; + /** * The maximum number of tokens to generate. * @remarks diff --git a/js/packages/teams-ai/src/prompts/UserInputMessage.ts b/js/packages/teams-ai/src/prompts/UserInputMessage.ts new file mode 100644 index 000000000..9e313396c --- /dev/null +++ b/js/packages/teams-ai/src/prompts/UserInputMessage.ts @@ -0,0 +1,98 @@ +/** + * @module teams-ai + */ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +import { Message, MessageContentParts } from './Message'; +import { PromptFunctions } from './PromptFunctions'; +import { RenderedPromptSection } from './PromptSection'; +import { PromptSectionBase } from './PromptSectionBase'; +import { TurnContext } from 'botbuilder'; +import { Tokenizer } from '../tokenizers'; +import { Memory } from '../MemoryFork'; +import { InputFile } from '../InputFileDownloader'; + +/** + * A section capable of rendering user input text and images as a user message. + */ +export class UserInputMessage extends PromptSectionBase { + private readonly _inputVariable: string; + private readonly _filesVariable: string; + + /** + * Creates a new 'UserInputMessage' instance. + * @param tokens Optional. Sizing strategy for this section. Defaults to `auto`. + * @param inputVariable Optional. Name of the variable containing the user input text. Defaults to `input`. + * @param filesVariable Optional. Name of the variable containing the user input files. Defaults to `inputFiles`. + */ + public constructor(tokens: number = -1, inputVariable = 'input', filesVariable = 'inputFiles') { + super(tokens, true, '\n', 'user: '); + this._inputVariable = inputVariable; + this._filesVariable = filesVariable; + } + + /** + * @param context + * @param memory + * @param functions + * @param tokenizer + * @param maxTokens + * @private + */ + public async renderAsMessages( + context: TurnContext, + memory: Memory, + functions: PromptFunctions, + tokenizer: Tokenizer, + maxTokens: number + ): Promise[]>> { + // Get input text & images + const inputText: string = memory.getValue(this._inputVariable) ?? ''; + const inputFiles: InputFile[] = memory.getValue(this._filesVariable) ?? []; + + // Create message + const message: Message = { + role: 'user', + content: [] + }; + + // Append text content part + let length = 0; + let budget = this.getTokenBudget(maxTokens); + if (inputText.length > 0) { + const encoded = tokenizer.encode(inputText); + if (encoded.length <= budget) { + message.content!.push({ type: 'text', text: inputText }); + length += encoded.length; + budget -= encoded.length; + } else { + message.content!.push({ type: 'text', text: tokenizer.decode(encoded.slice(0, budget)) }); + length += budget; + budget = 0; + } + } + + // Append image content parts + const images = inputFiles.filter((f) => f.contentType.startsWith('image/')); + for (const image of images) { + // Check for budget to add image + // TODO: This accounts for low detail images but not high detail images. + // Additional work is needed to account for high detail images. + if (budget < 85) { + break; + } + + // Add image + const url = `data:${image.contentType};base64,${image.content.toString('base64')}`; + message.content!.push({ type: 'image', image_url: { url } }); + length += 85; + budget -= 85; + } + + // Return output + return { output: [message], length, tooLong: false }; + } +} diff --git a/js/packages/teams-ai/src/prompts/index.ts b/js/packages/teams-ai/src/prompts/index.ts index b094feed4..1461ba172 100644 --- a/js/packages/teams-ai/src/prompts/index.ts +++ b/js/packages/teams-ai/src/prompts/index.ts @@ -24,4 +24,5 @@ export * from "./SystemMessage"; export * from "./TemplateSection"; export * from "./TestPromptManager"; export * from "./TextSection"; +export * from "./UserInputMessage"; export * from "./UserMessage"; diff --git a/js/samples/04.ai.a.teamsChefBot/package.json b/js/samples/04.ai.a.teamsChefBot/package.json index a24c8c6dc..443edf853 100644 --- a/js/samples/04.ai.a.teamsChefBot/package.json +++ b/js/samples/04.ai.a.teamsChefBot/package.json @@ -20,7 +20,7 @@ "url": "https://github.com" }, "dependencies": { - "@microsoft/teams-ai": "~1.0.0-preview", + "@microsoft/teams-ai": "~1.0.0-preview.2", "@microsoft/teamsfx": "^2.3.0", "botbuilder": "^4.21.2", "dotenv": "^16.3.1", diff --git a/js/samples/04.ai.a.teamsChefBot/src/index.ts b/js/samples/04.ai.a.teamsChefBot/src/index.ts index 418a80c88..f58301027 100644 --- a/js/samples/04.ai.a.teamsChefBot/src/index.ts +++ b/js/samples/04.ai.a.teamsChefBot/src/index.ts @@ -67,15 +67,7 @@ server.listen(process.env.port || process.env.PORT || 3978, () => { console.log('\nTo test your bot in Teams, sideload the app manifest.json within Teams Apps.'); }); -import { - AI, - Application, - ActionPlanner, - OpenAIModerator, - OpenAIModel, - PromptManager, - TurnState -} from '@microsoft/teams-ai'; +import { AI, Application, ActionPlanner, OpenAIModel, PromptManager, TurnState } from '@microsoft/teams-ai'; import { addResponseFormatter } from './responseFormatter'; import { VectraDataSource } from './VectraDataSource'; diff --git a/js/samples/04.ai.b.messageExtensions.AI-ME/src/index.ts b/js/samples/04.ai.b.messageExtensions.AI-ME/src/index.ts index 8dc95c865..76b7dcdb3 100644 --- a/js/samples/04.ai.b.messageExtensions.AI-ME/src/index.ts +++ b/js/samples/04.ai.b.messageExtensions.AI-ME/src/index.ts @@ -72,10 +72,7 @@ server.listen(process.env.port || process.env.PORT || 3978, () => { }); import { - AI, - Application, ActionPlanner, - OpenAIModerator, OpenAIModel, PromptManager, TurnState, diff --git a/js/samples/04.ai.f.vision.cardMaster/.eslintignore b/js/samples/04.ai.f.vision.cardMaster/.eslintignore new file mode 100644 index 000000000..542667af9 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.eslintignore @@ -0,0 +1,8 @@ +bin +build +demo-packages +dist +manifest +node_modules +package-lock.json +docs/assets/main.js \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/.eslintrc b/js/samples/04.ai.f.vision.cardMaster/.eslintrc new file mode 100644 index 000000000..7f8ea4f1f --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.eslintrc @@ -0,0 +1,68 @@ +{ + "parser": "@typescript-eslint/parser", + "root": true, + "env": { + "browser": true, + "node": true, + "es2015": true, + "mocha": true, + "jest": true + }, + "extends": [ + "eslint:recommended", + "plugin:@typescript-eslint/recommended", + "plugin:import/typescript", + "plugin:import/recommended", + "plugin:jsdoc/recommended", + + "plugin:security/recommended", + "plugin:prettier/recommended" // Recommended to be last + ], + "plugins": [ + "@typescript-eslint", + "jsdoc", + + "mocha", + "only-warn", + "prettier" + // "react" + ], + "parserOptions": { + "ecmaVersion": 2015, + // Allows for the parsing of modern ECMAScript features + "sourceType": "module" // Allows for the use of imports + // "ecmaFeatures": { + // "jsx": true + // } + }, + "rules": { + // Place to specify ESLint rules. Can be used to overwrite rules specified from the extended configs + "@typescript-eslint/ban-types": "off", + "@typescript-eslint/explicit-function-return-type": "off", + "@typescript-eslint/explicit-member-accessibility": "off", + "@typescript-eslint/explicit-module-boundary-types": "off", + "@typescript-eslint/interface-name-prefix": "off", + "@typescript-eslint/no-empty-function": "off", + "@typescript-eslint/no-explicit-any": "off", + "@typescript-eslint/no-namespace": "off", + "@typescript-eslint/no-non-null-assertion": "off", + "@typescript-eslint/no-unused-vars": "off", + "lodash/prefer-includes": "off", + "no-async-promise-executor": "off", + "no-constant-condition": "off", + "no-undef": "off", // Disabled due to conflicts with @typescript/eslint + "no-unused-vars": "off", // Disabled due to conflicts with @typescript/eslint + "prettier/prettier": "error" + }, + "overrides": [ + { + "files": ["bin/*.js", "lib/*.js"] + } + ], + "ignorePatterns": ["node_modules/*"], + "settings": { + // "react": { + // "version": "detect" + // } + } +} diff --git a/js/samples/04.ai.f.vision.cardMaster/.gitignore b/js/samples/04.ai.f.vision.cardMaster/.gitignore new file mode 100644 index 000000000..dc74ce75f --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.gitignore @@ -0,0 +1,115 @@ +.zip + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov +/**/.vscode +/**/lib +lib + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# TypeScript v1 declaration files +typings/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test + +# parcel-bundler cache (https://parceljs.org/) +.cache + +# Next.js build output +.next + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and *not* Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Teams Toolkit +env/.env.*.user +env/.env.local +appPackage/build +.deployment diff --git a/js/samples/04.ai.f.vision.cardMaster/.prettierignore b/js/samples/04.ai.f.vision.cardMaster/.prettierignore new file mode 100644 index 000000000..39635917f --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.prettierignore @@ -0,0 +1,7 @@ +bin +build +demo-packages +dist +manifest +node_modules +package-lock.json \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/.prettierrc b/js/samples/04.ai.f.vision.cardMaster/.prettierrc new file mode 100644 index 000000000..180246217 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.prettierrc @@ -0,0 +1,11 @@ +{ + "arrowParens": "always", + "endOfLine": "auto", + "printWidth": 120, + "semi": true, + "singleAttributePerLine": false, + "singleQuote": true, + "tabWidth": 4, + "trailingComma": "none", + "useTabs": false +} diff --git a/js/samples/04.ai.f.vision.cardMaster/.vscode/launch.json b/js/samples/04.ai.f.vision.cardMaster/.vscode/launch.json new file mode 100644 index 000000000..063ae2e52 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.vscode/launch.json @@ -0,0 +1,95 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Launch Remote (Edge)", + "type": "msedge", + "request": "launch", + "url": "https://teams.microsoft.com/l/app/${{TEAMS_APP_ID}}?installAppPackage=true&webjoin=true&${account-hint}", + "presentation": { + "group": "remote", + "order": 1 + }, + "internalConsoleOptions": "neverOpen" + }, + { + "name": "Launch Remote (Chrome)", + "type": "chrome", + "request": "launch", + "url": "https://teams.microsoft.com/l/app/${{TEAMS_APP_ID}}?installAppPackage=true&webjoin=true&${account-hint}", + "presentation": { + "group": "remote", + "order": 2 + }, + "internalConsoleOptions": "neverOpen" + }, + { + "name": "Launch App (Edge)", + "type": "msedge", + "request": "launch", + "url": "https://teams.microsoft.com/l/app/${{local:TEAMS_APP_ID}}?installAppPackage=true&webjoin=true&${account-hint}", + "cascadeTerminateToConfigurations": [ + "Attach to Local Service" + ], + "presentation": { + "group": "all", + "hidden": true + }, + "internalConsoleOptions": "neverOpen" + }, + { + "name": "Launch App (Chrome)", + "type": "chrome", + "request": "launch", + "url": "https://teams.microsoft.com/l/app/${{local:TEAMS_APP_ID}}?installAppPackage=true&webjoin=true&${account-hint}", + "cascadeTerminateToConfigurations": [ + "Attach to Local Service" + ], + "presentation": { + "group": "all", + "hidden": true + }, + "internalConsoleOptions": "neverOpen" + }, + { + "name": "Attach to Local Service", + "type": "node", + "request": "attach", + "port": 9239, + "restart": true, + "presentation": { + "group": "all", + "hidden": true + }, + "internalConsoleOptions": "neverOpen" + } + ], + "compounds": [ + { + "name": "Debug (Edge)", + "configurations": [ + "Launch App (Edge)", + "Attach to Local Service" + ], + "preLaunchTask": "Start Teams App Locally", + "presentation": { + "group": "all", + "order": 1 + }, + "stopAll": true + }, + { + "name": "Debug (Chrome)", + "configurations": [ + "Launch App (Chrome)", + "Attach to Local Service" + ], + "preLaunchTask": "Start Teams App Locally", + "presentation": { + "group": "all", + "order": 2 + }, + "stopAll": true + } + ] +} diff --git a/js/samples/04.ai.f.vision.cardMaster/.vscode/tasks.json b/js/samples/04.ai.f.vision.cardMaster/.vscode/tasks.json new file mode 100644 index 000000000..585f86ae9 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/.vscode/tasks.json @@ -0,0 +1,105 @@ +// This file is automatically generated by Teams Toolkit. +// The teamsfx tasks defined in this file require Teams Toolkit version >= 5.0.0. +// See https://aka.ms/teamsfx-tasks for details on how to customize each task. +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Start Teams App Locally", + "dependsOn": [ + "Validate prerequisites", + "Start local tunnel", + "Provision", + "Deploy", + "Start application" + ], + "dependsOrder": "sequence" + }, + { + // Check all required prerequisites. + // See https://aka.ms/teamsfx-tasks/check-prerequisites to know the details and how to customize the args. + "label": "Validate prerequisites", + "type": "teamsfx", + "command": "debug-check-prerequisites", + "args": { + "prerequisites": [ + "nodejs", // Validate if Node.js is installed. + "m365Account", // Sign-in prompt for Microsoft 365 account, then validate if the account enables the sideloading permission. + "portOccupancy" // Validate available ports to ensure those debug ones are not occupied. + ], + "portOccupancy": [ + 3978, // app service port + 9239 // app inspector port for Node.js debugger + ] + } + }, + { + // Start the local tunnel service to forward public URL to local port and inspect traffic. + // See https://aka.ms/teamsfx-tasks/local-tunnel for the detailed args definitions. + "label": "Start local tunnel", + "type": "teamsfx", + "command": "debug-start-local-tunnel", + "args": { + "type": "dev-tunnel", + "ports": [ + { + "portNumber": 3978, + "protocol": "http", + "access": "public", + "writeToEnvironmentFile": { + "endpoint": "BOT_ENDPOINT", // output tunnel endpoint as BOT_ENDPOINT + "domain": "BOT_DOMAIN" // output tunnel domain as BOT_DOMAIN + } + } + ], + "env": "local" + }, + "isBackground": true, + "problemMatcher": "$teamsfx-local-tunnel-watch" + }, + { + // Create the debug resources. + // See https://aka.ms/teamsfx-tasks/provision to know the details and how to customize the args. + "label": "Provision", + "type": "teamsfx", + "command": "provision", + "args": { + "env": "local" + } + }, + { + // Build project. + // See https://aka.ms/teamsfx-tasks/deploy to know the details and how to customize the args. + "label": "Deploy", + "type": "teamsfx", + "command": "deploy", + "args": { + "env": "local" + } + }, + { + "label": "Start application", + "type": "shell", + "command": "npm run dev:teamsfx", + "isBackground": true, + "options": { + "cwd": "${workspaceFolder}" + }, + "problemMatcher": { + "pattern": [ + { + "regexp": "^.*$", + "file": 0, + "location": 1, + "message": 2 + } + ], + "background": { + "activeOnStart": true, + "beginsPattern": "[nodemon] starting", + "endsPattern": "restify listening to|Bot/ME service listening at|[nodemon] app crashed" + } + } + } + ] +} \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/README.md b/js/samples/04.ai.f.vision.cardMaster/README.md new file mode 100644 index 000000000..8e5ebf434 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/README.md @@ -0,0 +1,165 @@ +# Microsoft Teams Vision Enabled Bot : Card Master +This is a conversational bot for Microsoft Teams with AI Vision support that is able to generate Adaptive Cards from uploaded images using `gpt-4-vision-preview`. + +*Table of contents* + + + + + +- [Card Master Bot](#microsoft-teams-vision-enabled-bot--card-master) + - [Setting up the sample](#setting-up-the-sample) + - [Interacting with the bot](#interacting-with-the-bot) + - [Multiple ways to test](#multiple-ways-to-test) + - [Using Teams Toolkit for Visual Studio Code](#using-teams-toolkit-for-visual-studio-code) + - [Using Teams Toolkit CLI](#using-teams-toolkit-cli) + - [Manually upload the app to a Teams desktop client](#manually-upload-the-app-to-a-teams-desktop-client) + - [Deploy the bot to Azure](#deploy-the-bot-to-azure) + - [Further reading](#further-reading) + + + +## Setting up the sample + +1. Clone the repository + + ```bash + git clone https://github.com/Microsoft/teams-ai.git + ``` + +2. In the root JavaScript folder, install and build all dependencies + + ```bash + cd teams-ai/js + yarn install + yarn build + ``` + +3. In a terminal, navigate to the sample root. + + ```bash + cd teams-ai/js/samples/04.ai.f.vision.cardMaster/ + ``` + +4. Duplicate the `sample.env` in the `teams-ai/js/samples/04.ai.f.vision.cardMaster` folder. Rename the file to `.env`. + +5. Add in your `OPENAI_KEY` key. + +6. Update `config.json` and `index.ts` with your model deployment name. + + +## Interacting with the bot + +You can interact with this bot by sending it a message with an image or a doodle. + +## Multiple ways to test + +The easiest and fastest way to get up and running is with Teams Toolkit as your development guide. To use Teams Toolkit to continue setup and debugging, please continue below. + +Otherwise, if you only want to run the bot locally and build manually, please jump to the [BotFramework Emulator](#testing-in-BotFramework-emulator) section. + +### Using Teams Toolkit for Visual Studio Code + +The simplest way to run this sample in Teams is to use Teams Toolkit for Visual Studio Code. + +1. Ensure you have downloaded and installed [Visual Studio Code](https://code.visualstudio.com/docs/setup/setup-overview) +1. Install the [Teams Toolkit extension](https://marketplace.visualstudio.com/items?itemName=TeamsDevApp.ms-teams-vscode-extension) + 1. Login with your M365 account. + 2. Login with your Azure account. Ensure that you have a valid subscription and resource group. This will be required to provision your bot. +2. Select **File > Open Folder** in VS Code and choose this sample's directory from the repo +3. Using the extension, sign in with your Microsoft 365 account where you have permissions to upload custom apps +4. Select **Debug > Start Debugging** or **F5** to run the app in a Teams web client. +5. In the browser that launches, select the **Add** button to install the app to Teams. + +> If you do not have permission to upload custom apps (sideloading), Teams Toolkit will recommend creating and using a Microsoft 365 Developer Program account - a free program to get your own dev environment sandbox that includes Teams. + +### Using Teams Toolkit CLI + +You can also use the Teams Toolkit CLI to run this sample. + +1. Install the CLI + + ```bash + npm install -g @microsoft/teamsfx-cli + ``` + +1. Open a second shell instance and run ngrok tunneling service - point to port 3978 + + ```bash + ngrok http --host-header=rewrite 3978 + ``` + +1. Copy the ngrok URL and put the URL and domain in the `/env/env.local` file + + ```bash + BOT_ENDPOINT=https://{ngrok-url}.ngrok.io + BOT_DOMAIN={ngrok-url}.ngrok.io + ``` + +1. In the repository directory, run the Teams Toolkit CLI commands to automate the setup needed for the app + + ```bash + cd teams-ai/js/samples/07.whoBot/ + teamsfx provision --env local + + ``` + +1. Next, use the CLI to validate and create an app package + + ```bash + teamsfx deploy --env local + ``` + +1. Finally, use the CLI to preview the app in Teams + + ```bash + teamsfx preview --env local + ``` + +### Manually upload the app to a Teams desktop client + +> If you used Teams Toolkit in the above steps, you can [upload a custom app](https://learn.microsoft.com/en-us/microsoftteams/platform/concepts/deploy-and-publish/apps-upload) to a desktop client using the `/appPackage/appPackage.local.zip` file created by the tools and skip to step 6. + +1. In a terminal, navigate to `teams-ai/js/samples/07.whoBot/` + + ```bash + cd teams-ai/js/samples/07.whoBot/ + ``` + +1. Run ngrok tunneling service - point to port 3978 + + ```bash + ngrok http --host-header=rewrite 3978 + ``` + +1. Create [Bot Framework registration resource](https://docs.microsoft.com/en-us/azure/bot-service/bot-service-quickstart-registration) in Azure + + - Use the current `https` URL you were given by running ngrok. Append with the path `/api/messages` used by this sample. + - Ensure that you've [enabled the Teams Channel](https://docs.microsoft.com/en-us/azure/bot-service/channel-connect-teams?view=azure-bot-service-4.0) + +1. Update the `.env` configuration for the bot to use the Microsoft App Id and App Password from the Bot Framework registration. (Note the App Password is referred to as the "client secret" in the Azure Portal and you can always create a new client secret anytime.) + +1. **_This step is specific to Teams._** + + - **Edit** the `manifest.json` contained in the `appPackage` folder to replace your Microsoft App Id (that was created when you registered your bot earlier) _everywhere_ you see the place holder string `${{TEAMS_APP_ID}}` (depending on the scenario the Microsoft App Id may occur multiple times in the `manifest.json`). If you haven't created an Azure app service yet, you can use your bot id for the above. You're bot id should be pasted in where you see `${{BOT_ID}}`. Replace everywhere you see `${{BOT_DOMAIN}}` with the domain part of the URL created by your tunneling solution. + - **Zip** up the contents of the `appPackage` folder to create a `manifest.zip` + +1. Run your app from the command line: + + ```bash + yarn start + ``` + +1. [Upload the app](https://learn.microsoft.com/en-us/microsoftteams/platform/concepts/deploy-and-publish/apps-upload) file (manifest.zip created in the previous step) in Teams. + + +## Deploy the bot to Azure + +You can use Teams Toolkit for VS Code or CLI to host the bot in Azure. The sample includes Bicep templates in the `/infra` directory which are used by the tools to create resources in Azure. Use the **Provision** and **Deploy** menus of the Teams Toolkit extension or from the CLI with `teamsfx provision` and `teamsfx deploy`. [Visit the documentation](https://learn.microsoft.com/en-us/microsoftteams/platform/toolkit/provision) for more info on hosting your app in Azure with Teams Toolkit. + +Alternatively, you can learn more about deploying a bot to Azure manually in the [Deploy your bot to Azure](https://aka.ms/azuredeployment) documentation. + +## Further reading + +- [Teams Toolkit overview](https://learn.microsoft.com/en-us/microsoftteams/platform/toolkit/teams-toolkit-fundamentals) +- [How Microsoft Teams bots work](https://docs.microsoft.com/en-us/azure/bot-service/bot-builder-basics-teams?view=azure-bot-service-4.0&tabs=javascript) \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/appPackage/color.png b/js/samples/04.ai.f.vision.cardMaster/appPackage/color.png new file mode 100644 index 000000000..4ab158588 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/appPackage/color.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67c7c063ba4dc41c977080c1f1fa17c897e1c72ec4a6412ed5e681b5d4cb9680 +size 1066 diff --git a/js/samples/04.ai.f.vision.cardMaster/appPackage/manifest.json b/js/samples/04.ai.f.vision.cardMaster/appPackage/manifest.json new file mode 100644 index 000000000..6c32b7593 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/appPackage/manifest.json @@ -0,0 +1,48 @@ +{ + "$schema": "https://developer.microsoft.com/json-schemas/teams/v1.15/MicrosoftTeams.schema.json", + "version": "1.1.0", + "manifestVersion": "1.15", + "id": "${{TEAMS_APP_ID}}", + "packageName": "com.package.name", + "name": { + "short": "CardMaster-${{TEAMSFX_ENV}}", + "full": "Teams Card Master" + }, + "developer": { + "name": "CardMaster", + "mpnId": "", + "websiteUrl": "https://microsoft.com", + "privacyUrl": "https://privacy.microsoft.com/privacystatement", + "termsOfUseUrl": "https://www.microsoft.com/legal/terms-of-use" + }, + "description": { + "short": "A vision enabled bot capable of generating Adaptive Cards using uploaded images.", + "full": "A vision enabled bot capable of generating Adaptive Cards using uploaded images." + }, + "icons": { + "outline": "outline.png", + "color": "color.png" + }, + "accentColor": "#FFFFFF", + "staticTabs": [ + { + "entityId": "conversations", + "scopes": ["personal"] + }, + { + "entityId": "about", + "scopes": ["personal"] + } + ], + "bots": [ + { + "botId": "${{BOT_ID}}", + "scopes": ["personal", "team", "groupChat"], + "isNotificationOnly": false, + "supportsCalling": false, + "supportsVideo": false, + "supportsFiles": false + } + ], + "validDomains": [] +} diff --git a/js/samples/04.ai.f.vision.cardMaster/appPackage/outline.png b/js/samples/04.ai.f.vision.cardMaster/appPackage/outline.png new file mode 100644 index 000000000..458549f6d --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/appPackage/outline.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1ddc76f79027d9c0300689721649ce1f1950271a5fc4ca50ae56545228fb566 +size 249 diff --git a/js/samples/04.ai.f.vision.cardMaster/env/.env.local b/js/samples/04.ai.f.vision.cardMaster/env/.env.local new file mode 100644 index 000000000..46c721863 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/env/.env.local @@ -0,0 +1,11 @@ +# This file includes environment variables that can be committed to git. It's gitignored by default because it represents your local development environment. + +# Built-in environment variables +TEAMSFX_ENV=local + +# Generated during provision, you can also add your own variables. +BOT_ID= +TEAMS_APP_ID= +BOT_DOMAIN= +BOT_ENDPOINT= +TEAMS_APP_TENANT_ID= \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/env/.env.local.user b/js/samples/04.ai.f.vision.cardMaster/env/.env.local.user new file mode 100644 index 000000000..cb64125e8 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/env/.env.local.user @@ -0,0 +1,2 @@ +SECRET_BOT_PASSWORD= +TEAMS_APP_UPDATE_TIME= \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/env/.env.staging b/js/samples/04.ai.f.vision.cardMaster/env/.env.staging new file mode 100644 index 000000000..de9e39aee --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/env/.env.staging @@ -0,0 +1,15 @@ +# This file includes environment variables that will be committed to git by default. + +# Built-in environment variables +TEAMSFX_ENV=staging + +# Updating AZURE_SUBSCRIPTION_ID or AZURE_RESOURCE_GROUP_NAME after provision may also require an update to RESOURCE_SUFFIX, because some services require a globally unique name across subscriptions/resource groups. +AZURE_SUBSCRIPTION_ID= +AZURE_RESOURCE_GROUP_NAME= +RESOURCE_SUFFIX= + +# Generated during provision, you can also add your own variables. +BOT_ID= +TEAMS_APP_ID= +BOT_AZURE_APP_SERVICE_RESOURCE_ID= +BOT_DOMAIN= diff --git a/js/samples/04.ai.f.vision.cardMaster/infra/azure.bicep b/js/samples/04.ai.f.vision.cardMaster/infra/azure.bicep new file mode 100644 index 000000000..41cf99a69 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/infra/azure.bicep @@ -0,0 +1,82 @@ +@maxLength(20) +@minLength(4) +@description('Used to generate names for all resources in this file') +param resourceBaseName string + +@description('Required when create Azure Bot service') +param botAadAppClientId string + +@secure() +@description('Required by Bot Framework package in your bot project') +param botAadAppClientSecret string + +param webAppSKU string + +@maxLength(42) +param botDisplayName string + +param serverfarmsName string = resourceBaseName +param webAppName string = resourceBaseName +param location string = resourceGroup().location + +// Compute resources for your Web App +resource serverfarm 'Microsoft.Web/serverfarms@2021-02-01' = { + kind: 'app' + location: location + name: serverfarmsName + sku: { + name: webAppSKU + } +} + +// Web App that hosts your bot +resource webApp 'Microsoft.Web/sites@2021-02-01' = { + kind: 'app' + location: location + name: webAppName + properties: { + serverFarmId: serverfarm.id + httpsOnly: true + siteConfig: { + alwaysOn: true + appSettings: [ + { + name: 'WEBSITE_RUN_FROM_PACKAGE' + value: '1' // Run Azure APP Service from a package file + } + { + name: 'WEBSITE_NODE_DEFAULT_VERSION' + value: '~18' // Set NodeJS version to 18.x for your site + } + { + name: 'RUNNING_ON_AZURE' + value: '1' + } + { + name: 'BOT_ID' + value: botAadAppClientId + } + { + name: 'BOT_PASSWORD' + value: botAadAppClientSecret + } + ] + ftpsState: 'FtpsOnly' + } + } +} + +// Register your web service as a bot with the Bot Framework +module azureBotRegistration './botRegistration/azurebot.bicep' = { + name: 'Azure-Bot-registration' + params: { + resourceBaseName: resourceBaseName + botAadAppClientId: botAadAppClientId + botAppDomain: webApp.properties.defaultHostName + botDisplayName: botDisplayName + } +} + +// The output will be persisted in .env.{envName}. Visit https://aka.ms/teamsfx-actions/arm-deploy for more details. +output BOT_AZURE_APP_SERVICE_RESOURCE_ID string = webApp.id +output BOT_DOMAIN string = webApp.properties.defaultHostName diff --git a/js/samples/04.ai.f.vision.cardMaster/infra/azure.parameters.json b/js/samples/04.ai.f.vision.cardMaster/infra/azure.parameters.json new file mode 100644 index 000000000..e5437d630 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/infra/azure.parameters.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "resourceBaseName": { + "value": "CardMasterbot${{RESOURCE_SUFFIX}}" + }, + "botAadAppClientId": { + "value": "${{BOT_ID}}" + }, + "botAadAppClientSecret": { + "value": "${{SECRET_BOT_PASSWORD}}" + }, + "webAppSKU": { + "value": "B1" + }, + "botDisplayName": { + "value": "CardMaster" + } + } + } \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/infra/botRegistration/azurebot.bicep b/js/samples/04.ai.f.vision.cardMaster/infra/botRegistration/azurebot.bicep new file mode 100644 index 000000000..ab67c7a56 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/infra/botRegistration/azurebot.bicep @@ -0,0 +1,37 @@ +@maxLength(20) +@minLength(4) +@description('Used to generate names for all resources in this file') +param resourceBaseName string + +@maxLength(42) +param botDisplayName string + +param botServiceName string = resourceBaseName +param botServiceSku string = 'F0' +param botAadAppClientId string +param botAppDomain string + +// Register your web service as a bot with the Bot Framework +resource botService 'Microsoft.BotService/botServices@2021-03-01' = { + kind: 'azurebot' + location: 'global' + name: botServiceName + properties: { + displayName: botDisplayName + endpoint: 'https://${botAppDomain}/api/messages' + msaAppId: botAadAppClientId + } + sku: { + name: botServiceSku + } +} + +// Connect the bot service to Microsoft Teams +resource botServiceMsTeamsChannel 'Microsoft.BotService/botServices/channels@2021-03-01' = { + parent: botService + location: 'global' + name: 'MsTeamsChannel' + properties: { + channelName: 'MsTeamsChannel' + } +} diff --git a/js/samples/04.ai.f.vision.cardMaster/infra/botRegistration/readme.md b/js/samples/04.ai.f.vision.cardMaster/infra/botRegistration/readme.md new file mode 100644 index 000000000..d5416243c --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/infra/botRegistration/readme.md @@ -0,0 +1 @@ +The `azurebot.bicep` module is provided to help you create Azure Bot service when you don't use Azure to host your app. If you use Azure as infrastrcture for your app, `azure.bicep` under infra folder already leverages this module to create Azure Bot service for you. You don't need to deploy `azurebot.bicep` again. \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/package.json b/js/samples/04.ai.f.vision.cardMaster/package.json new file mode 100644 index 000000000..85d0df8a6 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/package.json @@ -0,0 +1,39 @@ +{ + "name": "CardMaster-demo", + "version": "1.0.0", + "description": "A vision enabled bot capable of generating Adaptive Cards using uploaded images.", + "author": "Microsoft", + "license": "MIT", + "main": "./lib/index.js", + "scripts": { + "dev:teamsfx": "env-cmd --silent -f .localSettings yarn dev", + "dev": "nodemon --watch ./src --exec node --inspect=9239 --signal SIGINT -r ts-node/register ./src/index.ts", + "build": "tsc --build", + "clean": "rimraf node_modules lib", + "lint": "eslint **/src/**/*.{j,t}s{,x} --fix --no-error-on-unmatched-pattern", + "start": "tsc --build && node ./lib/index.js", + "test": "echo \"Error: no test specified\" && exit 1", + "watch": "nodemon --watch ./src -e ts --exec \"yarn start\"" + }, + "repository": { + "type": "git", + "url": "https://github.com" + }, + "dependencies": { + "@microsoft/teams-ai": "~1.0.0-preview.2", + "@microsoft/teamsfx": "^2.3.0", + "botbuilder": "^4.21.0", + "dotenv": "^16.3.1", + "replace": "~1.2.0", + "restify": "~11.1.0" + }, + "devDependencies": { + "@types/dotenv": "6.1.1", + "@types/restify": "8.5.9", + "@types/node": "^20.9.0", + "ts-node": "^10.9.1", + "env-cmd": "^10.1.0", + "nodemon": "~1.19.4", + "typescript": "^5.2.2" + } +} diff --git a/js/samples/04.ai.f.vision.cardMaster/sample.env b/js/samples/04.ai.f.vision.cardMaster/sample.env new file mode 100644 index 000000000..8a75c2544 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/sample.env @@ -0,0 +1,2 @@ +# This is an example file of how to set up environment variables. You can duplicate this file and add the appropriate keys. +OPENAI_KEY= \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/src/index.ts b/js/samples/04.ai.f.vision.cardMaster/src/index.ts new file mode 100644 index 000000000..653d36986 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/src/index.ts @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Import required packages +import { config } from 'dotenv'; +import * as path from 'path'; +import * as restify from 'restify'; + +// Import required bot services. +// See https://aka.ms/bot-services to learn more about the different parts of a bot. +import { + CardFactory, + CloudAdapter, + ConfigurationBotFrameworkAuthentication, + ConfigurationServiceClientCredentialFactory, + MemoryStorage, + MessageFactory, + TurnContext +} from 'botbuilder'; + +// Read botFilePath and botFileSecret from .env file. +const ENV_FILE = path.join(__dirname, '..', '.env'); +config({ path: ENV_FILE }); + +const botFrameworkAuthentication = new ConfigurationBotFrameworkAuthentication( + {}, + new ConfigurationServiceClientCredentialFactory({ + MicrosoftAppId: process.env.BOT_ID, + MicrosoftAppPassword: process.env.BOT_PASSWORD, + MicrosoftAppType: 'MultiTenant' + }) +); + +// Create adapter. +// See https://aka.ms/about-bot-adapter to learn more about how bots work. +const adapter = new CloudAdapter(botFrameworkAuthentication); + +// Catch-all for errors. +const onTurnErrorHandler = async (context: TurnContext, error: any) => { + // This check writes out errors to console log .vs. app insights. + // NOTE: In production environment, you should consider logging this to Azure + // application insights. + console.error(`\n [onTurnError] unhandled error: ${error}`); + console.log(error); + + // Send a trace activity, which will be displayed in Bot Framework Emulator + await context.sendTraceActivity( + 'OnTurnError Trace', + `${error}`, + 'https://www.botframework.com/schemas/error', + 'TurnError' + ); + + // Send a message to the user + await context.sendActivity('The bot encountered an error or bug.'); + await context.sendActivity('To continue to run this bot, please fix the bot source code.'); +}; + +// Set the onTurnError for the singleton CloudAdapter. +adapter.onTurnError = onTurnErrorHandler; + +// Create HTTP server. +const server = restify.createServer(); +server.use(restify.plugins.bodyParser()); + +server.listen(process.env.port || process.env.PORT || 3978, () => { + console.log(`\n${server.name} listening to ${server.url}`); + console.log('\nGet Bot Framework Emulator: https://aka.ms/botframework-emulator'); + console.log('\nTo test your bot in Teams, sideload the app manifest.json within Teams Apps.'); +}); + +import { + Application, + ActionPlanner, + OpenAIModel, + PromptManager, + TurnState, + TeamsAttachmentDownloader +} from '@microsoft/teams-ai'; + +// eslint-disable-next-line @typescript-eslint/no-empty-interface +interface ConversationState {} +type ApplicationTurnState = TurnState; + +if (!process.env.OPENAI_KEY && !process.env.AZURE_OPENAI_KEY) { + throw new Error('Missing environment variables - please check that OPENAI_KEY or AZURE_OPENAI_KEY is set.'); +} + +// Create AI components +const model = new OpenAIModel({ + // OpenAI Support + apiKey: process.env.OPENAI_KEY!, + defaultModel: 'gpt-4-vision-preview', + + // Azure OpenAI Support + azureApiKey: process.env.AZURE_OPENAI_KEY!, + azureDefaultDeployment: 'gpt-4-vision-preview', + azureEndpoint: process.env.AZURE_OPENAI_ENDPOINT!, + azureApiVersion: '2023-03-15-preview', + + // Use system messages for vision models + useSystemMessages: true, + + // Request logging + logRequests: true +}); + +const prompts = new PromptManager({ + promptsFolder: path.join(__dirname, '../src/prompts') +}); + +const planner = new ActionPlanner({ + model, + prompts, + defaultPrompt: 'chat', +}); + +// Create an attachment downloader +const downloader = new TeamsAttachmentDownloader({ + botAppId: process.env.BOT_ID!, + botAppPassword: process.env.BOT_PASSWORD!, +}); + +// Define storage and application +const storage = new MemoryStorage(); +const app = new Application({ + storage, + ai: { + planner + }, + fileDownloaders: [downloader], +}); + +interface SendCardParams { + card: any; +} + +app.ai.action('SendCard', async (context, state, params) => { + const attachment = CardFactory.adaptiveCard(params.card); + await context.sendActivity(MessageFactory.attachment(attachment)); + return 'card sent'; +}); + +app.ai.action('ShowCardJSON', async (context, state, params) => { + const json = JSON.stringify(params.card, null, 2); + await context.sendActivity(`
${json}
`); + return 'card displayed'; +}); + +// Listen for incoming server requests. +server.post('/api/messages', async (req, res) => { + // Route received a request to adapter for processing + await adapter.process(req, res as any, async (context) => { + // Dispatch to application for routing + await app.run(context); + }); +}); diff --git a/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/actions.json b/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/actions.json new file mode 100644 index 000000000..2b705dffc --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/actions.json @@ -0,0 +1,34 @@ +[ + { + "name": "SendCard", + "description": "Sends an adaptive card to the user", + "parameters": { + "type": "object", + "properties": { + "card": { + "type": "object", + "description": "The adaptive card to send" + } + }, + "required": [ + "card" + ] + } + }, + { + "name": "ShowCardJSON", + "description": "Shows the user the JSON for an adaptive card", + "parameters": { + "type": "object", + "properties": { + "card": { + "type": "object", + "description": "The adaptive card JSON to show" + } + }, + "required": [ + "card" + ] + } + } +] \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/config.json b/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/config.json new file mode 100644 index 000000000..b2054be07 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/config.json @@ -0,0 +1,22 @@ +{ + "schema": 1.1, + "description": "Vision Bot", + "type": "completion", + "completion": { + "model": "gpt-4-vision-preview", + "completion_type": "chat", + "include_history": true, + "include_input": true, + "include_images": true, + "max_input_tokens": 2800, + "max_tokens": 1000, + "temperature": 0.2, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": [] + }, + "augmentation": { + "augmentation_type": "sequence" + } + } \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/skprompt.txt b/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/skprompt.txt new file mode 100644 index 000000000..85bc992f0 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/src/prompts/chat/skprompt.txt @@ -0,0 +1,4 @@ +You are a friendly assistant for Microsoft Teams with vision support. +You are an expert on converting doodles and images to Adaptive Cards for Microsoft Teams. +When shown an image try to convert it to an Adaptive Card and send it using SendCard. +For Adaptive Cards with Image placeholders use ShowCardJSON instead. \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/teamsapp.local.yml b/js/samples/04.ai.f.vision.cardMaster/teamsapp.local.yml new file mode 100644 index 000000000..1703190d3 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/teamsapp.local.yml @@ -0,0 +1,49 @@ +# Visit https://aka.ms/teamsfx-v5.0-guide for details on this file +# Visit https://aka.ms/teamsfx-actions for details on actions +version: 1.0.0 + +provision: + - uses: teamsApp/create # Creates a Teams app + with: + name: CardMaster-${{TEAMSFX_ENV}} # Teams app name + writeToEnvironmentFile: # Write the information of installed dependencies into environment file for the specified environment variable(s). + teamsAppId: TEAMS_APP_ID + + - uses: botAadApp/create # Creates a new AAD app for bot if BOT_ID environment variable is empty + with: + name: CardMaster + writeToEnvironmentFile: + botId: BOT_ID + botPassword: SECRET_BOT_PASSWORD + + - uses: botFramework/create # Create or update the bot registration on dev.botframework.com + with: + botId: ${{BOT_ID}} + name: CardMaster + messagingEndpoint: ${{BOT_ENDPOINT}}/api/messages + description: "" + channels: + - name: msteams + + - uses: teamsApp/validateManifest # Validate using manifest schema + with: + manifestPath: ./appPackage/manifest.json # Path to manifest template + + - uses: teamsApp/zipAppPackage # Build Teams app package with latest env value + with: + manifestPath: ./appPackage/manifest.json # Path to manifest template + outputZipPath: ./build/appPackage/appPackage.${{TEAMSFX_ENV}}.zip + outputJsonPath: ./build/appPackage/manifest.${{TEAMSFX_ENV}}.json + + - uses: teamsApp/update # Apply the Teams app manifest to an existing Teams app in Teams Developer Portal. Will use the app id in manifest file to determine which Teams app to update. + with: + appPackagePath: ./build/appPackage/appPackage.${{TEAMSFX_ENV}}.zip # Relative path to this file. This is the path for built zip file. + +deploy: +# Provides the Teams Toolkit .env file values to the apps runtime so they can be accessed with `process.env`. + - uses: file/createOrUpdateEnvironmentFile + with: + target: ./.env + envs: + BOT_ID: ${{BOT_ID}} + BOT_PASSWORD: ${{SECRET_BOT_PASSWORD}} \ No newline at end of file diff --git a/js/samples/04.ai.f.vision.cardMaster/teamsapp.yml b/js/samples/04.ai.f.vision.cardMaster/teamsapp.yml new file mode 100644 index 000000000..21fc817b4 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/teamsapp.yml @@ -0,0 +1,79 @@ +# Visit https://aka.ms/teamsfx-v5.0-guide for details on this file +# Visit https://aka.ms/teamsfx-actions for details on actions +version: 1.0.0 + +environmentFolderPath: ./env + +# Triggered when 'teamsfx provision' is executed +provision: + - uses: teamsApp/create # Creates a Teams app + with: + name: CardMaster-${{TEAMSFX_ENV}} # Teams app name + writeToEnvironmentFile: + # Write the information of installed dependencies into environment file for the specified environment variable(s). + teamsAppId: TEAMS_APP_ID + + - uses: botAadApp/create # Creates a new AAD app for Bot Registration. + with: + name: CardMaster + writeToEnvironmentFile: + botId: BOT_ID + botPassword: SECRET_BOT_PASSWORD + + - uses: arm/deploy # Deploy given ARM templates parallelly. + with: + subscriptionId: ${{AZURE_SUBSCRIPTION_ID}} # The AZURE_SUBSCRIPTION_ID is a built-in environment variable. TeamsFx will ask you select one subscription if its value is empty. You're free to reference other environment varialbe here, but TeamsFx will not ask you to select subscription if it's empty in this case. + resourceGroupName: ${{AZURE_RESOURCE_GROUP_NAME}} # The AZURE_RESOURCE_GROUP_NAME is a built-in environment variable. TeamsFx will ask you to select or create one resource group if its value is empty. You're free to reference other environment varialbe here, but TeamsFx will not ask you to select or create resource grouop if it's empty in this case. + templates: + - path: ./infra/azure.bicep + parameters: ./infra/azure.parameters.json + deploymentName: Create-resources-for-bot + bicepCliVersion: v0.9.1 # Teams Toolkit will download this bicep CLI version from github for you, will use bicep CLI in PATH if you remove this config. + # Output: every bicep output will be persisted in current environment's .env file with certain naming conversion. Refer https://aka.ms/teamsfx-actions/arm-deploy for more details on the naming conversion rule. + + - uses: teamsApp/validateManifest # Validate using manifest schema + with: + manifestPath: ./teamsAppManifest/manifest.json # Path to manifest template + - uses: teamsApp/zipAppPackage # Build Teams app package with latest env value + with: + manifestPath: ./teamsAppManifest/manifest.json # Path to manifest template + outputZipPath: ./build/teamsAppManifest/appPackage.${{TEAMSFX_ENV}}.zip + outputJsonPath: ./build/teamsAppManifest/manifest.${{TEAMSFX_ENV}}.json + - uses: teamsApp/update # Apply the Teams app manifest to an existing Teams app in Teams Developer Portal. Will use the app id in manifest file to determine which Teams app to update. + with: + appPackagePath: ./build/teamsAppManifest/appPackage.${{TEAMSFX_ENV}}.zip # Relative path to this file. This is the path for built zip file. + writeToEnvironmentFile: + # Write the information of installed dependencies into environment file for the specified environment variable(s). + teamsAppId: TEAMS_APP_ID + +# Triggered when 'teamsfx deploy' is executed +deploy: + - uses: azureAppService/deploy # Deploy bits to Azure App Serivce + with: + distributionPath: . # Deploy base folder + ignoreFile: ./.appserviceignore # Can be changed to any ignore file location, leave blank will ignore nothing + resourceId: ${{BOT_AZURE_APP_SERVICE_RESOURCE_ID}} # The resource id of the cloud resource to be deployed to. This key will be generated by arm/deploy action automatically. You can replace it with your existing Azure Resource id or add it to your environment variable file. + +# Triggered when 'teamsfx publish' is executed +publish: + - uses: teamsApp/validateManifest # Validate using manifest schema + with: + manifestPath: ./teamsAppManifest/manifest.json # Path to manifest template + - uses: teamsApp/zipAppPackage + with: + manifestPath: ./teamsAppManifest/manifest.json # Path to manifest template + outputZipPath: ./build/teamsAppManifest/appPackage.${{TEAMSFX_ENV}}.zip + outputJsonPath: ./build/teamsAppManifest/manifest.${{TEAMSFX_ENV}}.json + - uses: teamsApp/update # Apply the Teams app manifest to an existing Teams app in Teams Developer Portal. Will use the app id in manifest file to determine which Teams app to update. + with: + appPackagePath: ./build/teamsAppManifest/appPackage.${{TEAMSFX_ENV}}.zip # Relative path to this file. This is the path for built zip file. + writeToEnvironmentFile: + # Write the information of installed dependencies into environment file for the specified environment variable(s). + teamsAppId: TEAMS_APP_ID + - uses: teamsApp/publishAppPackage # Publish the app to Teams Admin Center (https://admin.teams.microsoft.com/policies/manage-apps) for review and approval + with: + appPackagePath: ./build/teamsAppManifest/appPackage.${{TEAMSFX_ENV}}.zip + writeToEnvironmentFile: + # Write the information of installed dependencies into environment file for the specified environment variable(s). + publishedAppId: TEAMS_APP_PUBLISHED_APP_ID +projectId: f76f76ab-fa47-46f5-a401-5d5deb608718 diff --git a/js/samples/04.ai.f.vision.cardMaster/tsconfig.json b/js/samples/04.ai.f.vision.cardMaster/tsconfig.json new file mode 100644 index 000000000..21c92a851 --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/tsconfig.json @@ -0,0 +1,15 @@ +{ + "compilerOptions": { + "declaration": true, + "forceConsistentCasingInFileNames": true, + "incremental": true, + "module": "commonjs", + "outDir": "./lib", + "rootDir": "./src", + "sourceMap": true, + "strict": true, + "target": "es2017", + "tsBuildInfoFile": "./lib/.tsbuildinfo", + "esModuleInterop": true + } +} diff --git a/js/samples/04.ai.f.vision.cardMaster/web.config b/js/samples/04.ai.f.vision.cardMaster/web.config new file mode 100644 index 000000000..0226660da --- /dev/null +++ b/js/samples/04.ai.f.vision.cardMaster/web.config @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/js/samples/06.assistants.a.mathBot/src/bot.ts b/js/samples/06.assistants.a.mathBot/src/bot.ts index fbc6411bc..95b225282 100644 --- a/js/samples/06.assistants.a.mathBot/src/bot.ts +++ b/js/samples/06.assistants.a.mathBot/src/bot.ts @@ -11,10 +11,10 @@ const { AssistantsPlanner } = preview; if (!process.env.ASSISTANT_ID) { (async () => { const assistant = await AssistantsPlanner.createAssistant(process.env.OPENAI_KEY!, { - name: "Math Tutor", - instructions: "You are a personal math tutor. Write and run code to answer math questions.", - tools: [{ type: "code_interpreter" }], - model: "gpt-4-1106-preview" + name: 'Math Tutor', + instructions: 'You are a personal math tutor. Write and run code to answer math questions.', + tools: [{ type: 'code_interpreter' }], + model: 'gpt-4-1106-preview' }); console.log(`Created a new assistant with an ID of: ${assistant.id}`); diff --git a/js/samples/06.auth.teamsSSO.messageExtension/package.json b/js/samples/06.auth.teamsSSO.messageExtension/package.json index b86fd3496..f5481fc92 100644 --- a/js/samples/06.auth.teamsSSO.messageExtension/package.json +++ b/js/samples/06.auth.teamsSSO.messageExtension/package.json @@ -22,6 +22,7 @@ "@microsoft/microsoft-graph-client": "^3.0.7", "@microsoft/teams-ai": "~1.0.0-preview.1", "botbuilder": "^4.21.2", + "botbuilder-azure-blobs": "^4.21.2", "dotenv": "^16.3.1", "isomorphic-fetch": "^3.0.0", "replace": "~1.2.0", diff --git a/js/samples/06.auth.teamsSSO.messageExtension/src/index.ts b/js/samples/06.auth.teamsSSO.messageExtension/src/index.ts index 2d89f056b..31a9ca0a0 100644 --- a/js/samples/06.auth.teamsSSO.messageExtension/src/index.ts +++ b/js/samples/06.auth.teamsSSO.messageExtension/src/index.ts @@ -71,7 +71,7 @@ server.listen(process.env.port || process.env.PORT || 3978, () => { }); import axios from 'axios'; -import { ApplicationBuilder, AuthError, TurnState } from '@microsoft/teams-ai'; +import { ApplicationBuilder, TurnState } from '@microsoft/teams-ai'; import { createNpmPackageCard, createNpmSearchResultCard, createSignOutCard } from './cards'; import { GraphClient } from './graphClient'; diff --git a/js/yarn.lock b/js/yarn.lock index 2375504ea..c90f586a6 100644 --- a/js/yarn.lock +++ b/js/yarn.lock @@ -1192,7 +1192,7 @@ "@types/node" "*" form-data "^4.0.0" -"@types/node@*", "@types/node@^20.10.3": +"@types/node@*", "@types/node@^20.10.3", "@types/node@^20.9.0": version "20.10.3" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.10.3.tgz#4900adcc7fc189d5af5bb41da8f543cea6962030" integrity sha512-XJavIpZqiXID5Yxnxv3RUDKTN5b81ddNC3ecsA0SoFXz/QU8OGBwZGMomiq0zw+uuqbL/krztv/DINAQ/EV4gg== @@ -1226,6 +1226,16 @@ "@types/node" "*" "@types/spdy" "*" +"@types/restify@8.5.9": + version "8.5.9" + resolved "https://registry.yarnpkg.com/@types/restify/-/restify-8.5.9.tgz#9c3753093724bef7131d0322c71cd86b58db1672" + integrity sha512-4zXKw10NhlgDsVXcSJXfwjlQQwagITwogEVDM6ouyCOLRsiHfZdAuOkT9sRHBZHd2118TJBBItGx3vqLiGntjQ== + dependencies: + "@types/bunyan" "*" + "@types/formidable" "^1" + "@types/node" "*" + "@types/spdy" "*" + "@types/semver@^7.5.0": version "7.5.0" resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.0.tgz#591c1ce3a702c45ee15f47a42ade72c2fd78978a" @@ -2143,7 +2153,7 @@ botbuilder-stdlib@4.21.2-internal: resolved "https://registry.yarnpkg.com/botbuilder-stdlib/-/botbuilder-stdlib-4.21.2-internal.tgz#6a317ca8e960f73ba3f57947ab93192bc5de9dea" integrity sha512-M7n0RcmjlCipKS92E7ddxpBcRF0A85btQRNOd3+gpzGptk6YU0FMvE70+besVANxFHQQTMPwuwAdxVWBD2e4MA== -"botbuilder@>=4.18.0 <5.0.0", botbuilder@^4.21.2: +"botbuilder@>=4.18.0 <5.0.0", botbuilder@^4.21.0, botbuilder@^4.21.2: version "4.21.2" resolved "https://registry.yarnpkg.com/botbuilder/-/botbuilder-4.21.2.tgz#3458064c451c4316b35b3a957778119824d1517c" integrity sha512-3o4y+b7a2D+CnYYIlCctFwrz1isHK32tKw9I37k6+p47q4CeWyXf2pgnooJjkNz10HcdN1uE2hgEH8c6xY+AQw==