From 8b28322ce562c2f68294646635ba844780daf82c Mon Sep 17 00:00:00 2001 From: Lily Du Date: Wed, 9 Oct 2024 11:10:08 -0700 Subject: [PATCH] [C#] feat: Streaming + Sample (#2078) ## Linked issues closes: #1968 ## Details Added streaming support for C# with an associated sample bot. #### Change details - created a separate `IPromptCompletionStreamingModel` (not possible to have optional members for an interface). As a result, OpenAIModel declaration had to be updated. - converted `StreamingChannelData` into a class ## Attestation Checklist - [x] My code follows the style guidelines of this project - I have checked for/fixed spelling, linting, and other errors - I have commented my code for clarity - I have made corresponding changes to the documentation (updating the doc strings in the code is sufficient) - My changes generate no new warnings - I have added tests that validates my changes, and provides sufficient test coverage. I have tested with: - Local testing - E2E testing in Teams - New and existing unit tests pass locally with my changes --- .../AITests/ActionPlannerTests.cs | 72 ++++- .../AITests/ChatMessageTests.cs | 27 ++ .../AITests/LLMClientTests.cs | 144 +++++++++- .../AITests/Models/OpenAIModelTests.cs | 81 +++++- .../Application/StreamingResponseTests.cs | 238 +++++++++++++++++ .../TestUtils/OpenAIModelFactory.cs | 23 +- .../Microsoft.TeamsAI/AI/Clients/LLMClient.cs | 99 +++++++ .../AI/Clients/LLMClientOptions.cs | 11 + .../AI/Models/BaseOpenAIModelOptions.cs | 5 + .../AI/Models/ChatCompletionToolCall.cs | 10 + .../AI/Models/ChatMessage.cs | 60 ++++- .../AI/Models/IPromptCompletionModelEvents.cs | 163 ++++++++++++ .../Models/IPromptCompletionStreamingModel.cs | 14 + .../AI/Models/OpenAIModel.cs | 140 ++++++++-- .../AI/Models/PromptChunk.cs | 13 + .../AI/Models/PromptCompletionModelEmitter.cs | 52 ++++ .../AI/Planners/ActionPlanner.cs | 23 +- .../AI/Planners/ActionPlannerOptions.cs | 11 + .../Application/StreamType.cs | 23 ++ .../Application/StreamingChannelData.cs | 38 +++ .../Application/StreamingResponse.cs | 251 ++++++++++++++++++ .../.editorconfig | 240 +++++++++++++++++ .../04.ai.g.teamsChefBot-streaming/.gitignore | 25 ++ .../ActionHandlers.cs | 25 ++ .../AdapterWithErrorHandler.cs | 26 ++ .../04.ai.g.teamsChefBot-streaming/Config.cs | 29 ++ .../Controllers/BotController.cs | 32 +++ .../KernelMemoryDataSource.cs | 120 +++++++++ .../04.ai.g.teamsChefBot-streaming/Program.cs | 187 +++++++++++++ .../Prompts/Chat/config.json | 24 ++ .../Prompts/Chat/skprompt.txt | 4 + .../Properties/launchSettings.json | 27 ++ .../04.ai.g.teamsChefBot-streaming/README.md | 209 +++++++++++++++ .../TeamsChefBot.csproj | 62 +++++ .../TeamsChefBot.sln | 25 ++ .../appPackage/color.png | 3 + .../appPackage/manifest.json | 48 ++++ .../appPackage/outline.png | 3 + .../appsettings.Development.json | 21 ++ .../appsettings.json | 20 ++ .../assets/TeamsChefBot.png | 3 + .../env/.env.dev | 18 ++ .../env/.env.local | 12 + .../files/README.md | 1 + .../files/action-planner.txt | 28 ++ .../files/actions.txt | 62 +++++ .../files/ai-system.txt | 7 + .../files/application.txt | 69 +++++ .../files/augmentations.txt | 193 ++++++++++++++ .../files/c#-migration.txt | 244 +++++++++++++++++ .../files/concepts.txt | 33 +++ .../files/data-sources.txt | 47 ++++ .../files/getting started.txt | 31 +++ .../files/github.txt | 19 ++ .../files/js-migration.txt | 162 +++++++++++ .../files/migration.txt | 29 ++ .../files/moderator.txt | 68 +++++ .../files/planner.txt | 85 ++++++ .../files/prompts.txt | 192 ++++++++++++++ .../files/quickstart.txt | 153 +++++++++++ .../files/samples.txt | 77 ++++++ .../files/turns.txt | 105 ++++++++ .../infra/azure.bicep | 113 ++++++++ .../infra/azure.parameters.json | 36 +++ .../infra/botRegistration/azurebot.bicep | 37 +++ .../infra/botRegistration/readme.md | 1 + .../teamsapp.local.yml | 87 ++++++ .../teamsapp.yml | 98 +++++++ 68 files changed, 4593 insertions(+), 45 deletions(-) create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/Application/StreamingResponseTests.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionModelEvents.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionStreamingModel.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptChunk.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptCompletionModelEmitter.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamType.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingChannelData.cs create mode 100644 dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingResponse.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/.editorconfig create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/.gitignore create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/ActionHandlers.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/AdapterWithErrorHandler.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/Config.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/Controllers/BotController.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/KernelMemoryDataSource.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/Program.cs create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/config.json create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/skprompt.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/Properties/launchSettings.json create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/README.md create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.csproj create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.sln create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/color.png create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/manifest.json create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/outline.png create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.Development.json create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.json create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/assets/TeamsChefBot.png create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.dev create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.local create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/README.md create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/action-planner.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/actions.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/ai-system.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/application.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/augmentations.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/c#-migration.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/concepts.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/data-sources.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/getting started.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/github.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/js-migration.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/migration.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/moderator.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/planner.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/prompts.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/quickstart.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/samples.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/files/turns.txt create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.bicep create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.parameters.json create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/azurebot.bicep create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/readme.md create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.local.yml create mode 100644 dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.yml diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ActionPlannerTests.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ActionPlannerTests.cs index a67966f0d..38be75e91 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ActionPlannerTests.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ActionPlannerTests.cs @@ -174,7 +174,8 @@ public async Task Test_ContinueTaskAsync_PlanNull() var modelMock = new Mock(); var response = new PromptResponse() { - Status = PromptResponseStatus.Success + Status = PromptResponseStatus.Success, + Message = new(ChatRole.System), }; modelMock.Setup(model => model.CompletePromptAsync( It.IsAny(), @@ -230,7 +231,8 @@ public async Task Test_ContinueTaskAsync() var modelMock = new Mock(); var response = new PromptResponse() { - Status = PromptResponseStatus.Success + Status = PromptResponseStatus.Success, + Message = new(ChatRole.System), }; modelMock.Setup(model => model.CompletePromptAsync( It.IsAny(), @@ -279,6 +281,64 @@ public async Task Test_ContinueTaskAsync() Assert.Equal(planMock.Object, result); } + [Fact] + public async Task Test_ContinueTaskAsync_Streaming() + { + // Arrange + var modelMock = new Mock(); + var response = new PromptResponse() + { + Status = PromptResponseStatus.Success, + }; + modelMock.Setup(model => model.CompletePromptAsync( + It.IsAny(), + It.IsAny(), + It.IsAny>>(), + It.IsAny(), + It.IsAny(), + It.IsAny())).ReturnsAsync(response); + var promptTemplate = new PromptTemplate( + "prompt", + new(new() { }) + ); + var augmentationMock = new Mock(); + var planMock = new Plan(); + augmentationMock.Setup(augmentation => augmentation.CreatePlanFromResponseAsync( + It.IsAny(), + It.IsAny(), + It.IsAny(), + It.IsAny())).ReturnsAsync(planMock); + augmentationMock.Setup(augmentation => augmentation.ValidateResponseAsync( + It.IsAny(), + It.IsAny(), + It.IsAny(), + It.IsAny(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new Validation { Valid = true }); + promptTemplate.Augmentation = augmentationMock.Object; + var prompts = new PromptManager(); + prompts.AddPrompt("prompt", promptTemplate); + var options = new ActionPlannerOptions( + modelMock.Object, + prompts, + (context, state, planner) => Task.FromResult(promptTemplate) + ); + var turnContext = TurnStateConfig.CreateConfiguredTurnContext(); + var state = new TurnState(); + await state.LoadStateAsync(null, turnContext); + state.Temp.Input = "test"; + var planner = new ActionPlanner(options, new TestLoggerFactory()); + var ai = new AI(new(planner)); + + // Act + var result = await planner.ContinueTaskAsync(turnContext, state, ai); + + // Assert + Assert.Equal(planMock.Type, result.Type); + Assert.Equal(planMock.Commands, result.Commands); + } + + [Fact] public async Task Test_BeginTaskAsync_PromptResponseStatusError() { @@ -369,7 +429,8 @@ public async Task Test_BeginTaskAsync_PlanNull() var modelMock = new Mock(); var response = new PromptResponse() { - Status = PromptResponseStatus.Success + Status = PromptResponseStatus.Success, + Message = new(ChatRole.System), }; modelMock.Setup(model => model.CompletePromptAsync( It.IsAny(), @@ -425,7 +486,8 @@ public async Task Test_BeginTaskAsync() var modelMock = new Mock(); var response = new PromptResponse() { - Status = PromptResponseStatus.Success + Status = PromptResponseStatus.Success, + Message = new(ChatRole.System), }; modelMock.Setup(model => model.CompletePromptAsync( It.IsAny(), @@ -571,4 +633,4 @@ public void SetValue(string path, object value) } } } -} +} \ No newline at end of file diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ChatMessageTests.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ChatMessageTests.cs index 5fc35139f..4c7b9a0fa 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ChatMessageTests.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/ChatMessageTests.cs @@ -73,6 +73,33 @@ public void Test_Initialization_From_OpenAISdk_ChatMessage() Assert.Equal("test-content", context.Citations[0].Content); } + [Fact] + public void Test_Initialization_From_OpenAISdk_StreamingChatCompletionUpdate() + { + // Arrange + var chatCompletion = ModelReaderWriter.Read(BinaryData.FromString(@$"{{ + ""choices"": [ + {{ + ""finish_reason"": null, + ""delta"": {{ + ""role"": ""assistant"", + ""content"": ""hello"" + }} + }} + ] + }}")); + + // Act + var message = new ChatMessage(chatCompletion!); + + // Assert + Assert.Equal("hello", message.Content); + Assert.Equal(ChatRole.Assistant, message.Role); + + var context = message.Context; + Assert.Null(context); + } + [Fact] public void Test_InvalidRole_ToOpenAISdkChatMessage() { diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/LLMClientTests.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/LLMClientTests.cs index ecb4d2340..7c1a51de6 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/LLMClientTests.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/LLMClientTests.cs @@ -1,13 +1,18 @@ -using Microsoft.Bot.Builder; +using System.ClientModel.Primitives; +using Microsoft.Bot.Builder; using Microsoft.Teams.AI.AI.Clients; using Microsoft.Teams.AI.AI.Models; using Microsoft.Teams.AI.AI.Prompts; using Microsoft.Teams.AI.AI.Tokenizers; using Microsoft.Teams.AI.AI.Validators; +using Microsoft.Teams.AI.Application; using Microsoft.Teams.AI.Exceptions; using Microsoft.Teams.AI.State; using Microsoft.Teams.AI.Tests.TestUtils; using Moq; +using OpenAI.Chat; +using static Microsoft.Teams.AI.AI.Models.IPromptCompletionModelEvents; +using ChatMessage = Microsoft.Teams.AI.AI.Models.ChatMessage; namespace Microsoft.Teams.AI.Tests.AITests { @@ -149,6 +154,44 @@ public async Task Test_CompletePromptAsync_PromptResponse_Success() Assert.Equal(2, ((List)memory.Values[options.HistoryVariable]).Count); } + [Fact] + public async Task Test_CompletePromptAsync_Streaming_Success() + { + // Arrange + List chunks = new(); + chunks.Add("h"); + chunks.Add("i"); + var promptCompletionModel = TestPromptCompletionStreamingModel.StreamTextChunks(chunks); + var promptTemplate = new PromptTemplate( + "prompt", + new(new() { }) + ); + + ResponseReceivedHandler handler = new((object sender, ResponseReceivedEventArgs args) => + { + Assert.Equal("hi", args.Streamer.Message); + }); + + LLMClientOptions options = new(promptCompletionModel, promptTemplate) + { + StartStreamingMessage = "Begin streaming", + EndStreamHandler = handler, + }; + LLMClient client = new(options, null); + TestMemory memory = new(); + + // Act + var response = await client.CompletePromptAsync(new Mock().Object, memory, new PromptManager()); + + // Assert + Assert.NotNull(response); + Assert.Equal(PromptResponseStatus.Success, response.Status); + Assert.Null(response.Error); + Assert.NotNull(response.Message); + Assert.Equal(ChatRole.Assistant, response.Message.Role); + Assert.Equal("hi", response.Message.Content); + } + [Fact] public async Task Test_CompletePromptAsync_PromptResponse_Exception() { @@ -483,6 +526,105 @@ public Task CompletePromptAsync(ITurnContext turnContext, IMemor } } + private sealed class TestPromptCompletionStreamingModel : IPromptCompletionStreamingModel + { + public delegate Task Handler(TestPromptCompletionStreamingModel model, ITurnContext turnContext, IMemory memory, IPromptFunctions> promptFunctions, ITokenizer tokenizer, PromptTemplate promptTemplate); + + public event Handler handler; + + public PromptCompletionModelEmitter? Events { get; set; } = new(); + + public TestPromptCompletionStreamingModel(Handler handler) + { + this.handler = handler; + } + + public static TestPromptCompletionStreamingModel StreamTextChunks(IList chunks, int delay = 0) + { + Handler handler = new(async (TestPromptCompletionStreamingModel model, ITurnContext turnContext, IMemory memory, IPromptFunctions> promptFunctions, ITokenizer tokenizer, PromptTemplate promptTemplate) => + { + BeforeCompletionEventArgs args = new(turnContext, memory, promptFunctions, tokenizer, promptTemplate, true); + + model.Events = new(); + + model.Events.OnBeforeCompletion(args); + + string content = ""; + + for (int i = 0; i < chunks.Count; i++) + { + await Task.Delay(TimeSpan.FromSeconds(0)); + string text = chunks[i]; + content += text; + if (i == 0) + { + var update = ModelReaderWriter.Read(BinaryData.FromString(@$"{{ + ""choices"": [ + {{ + ""finish_reason"": null, + ""delta"": {{ + ""role"": ""assistant"", + ""content"": ""${content}"" + }} + }} + ] + }}")); + + ChatMessage currDeltaMessage = new(update!); + PromptChunk chunk = new() { delta = currDeltaMessage }; + + ChunkReceivedEventArgs firstChunkArgs = new(turnContext, memory, chunk); + + model.Events.OnChunkReceived(firstChunkArgs); + } + else + { + var update = ModelReaderWriter.Read(BinaryData.FromString(@$"{{ + ""choices"": [ + {{ + ""finish_reason"": null, + ""delta"": {{ + ""content"": ""${content}"" + }} + }} + ] + }}")); + + ChatMessage currDeltaMessage = new(update!); + PromptChunk chunk = new() { delta = currDeltaMessage }; + + ChunkReceivedEventArgs secondChunkArgs = new(turnContext, memory, chunk); + + model.Events.OnChunkReceived(secondChunkArgs); + } + + } + + await Task.Delay(TimeSpan.FromSeconds(delay)); + PromptResponse response = new() + { + Status = PromptResponseStatus.Success, + Message = new(ChatRole.Assistant) + { + Content = content, + } + }; + StreamingResponse streamer = new(turnContext); + ResponseReceivedEventArgs responseReceivedEventArgs = new(turnContext, memory, response, streamer); + + model.Events.OnResponseReceived(responseReceivedEventArgs); + return response; + }); + + return new TestPromptCompletionStreamingModel(handler); + } + + public Task CompletePromptAsync(ITurnContext turnContext, IMemory memory, IPromptFunctions> promptFunctions, ITokenizer tokenizer, PromptTemplate promptTemplate, CancellationToken cancellationToken) + { + return this.handler(this, turnContext, memory, promptFunctions, tokenizer, promptTemplate); + } + } + private sealed class TestValidator : IPromptResponseValidator { diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/Models/OpenAIModelTests.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/Models/OpenAIModelTests.cs index 62c0a8e10..1e9e1bb47 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/Models/OpenAIModelTests.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/AITests/Models/OpenAIModelTests.cs @@ -16,6 +16,8 @@ using ChatRole = Microsoft.Teams.AI.AI.Models.ChatRole; using Azure.Identity; using Microsoft.Teams.AI.AI.Augmentations; +using Microsoft.Teams.AI.Application; +using Microsoft.Bot.Schema; namespace Microsoft.Teams.AI.Tests.AITests.Models { @@ -223,11 +225,12 @@ public async Task Test_CompletePromptAsync_AzureOpenAI_Chat_WithTools() { Actions = new List() { new ChatCompletionAction() { Name = "testAction" } }, Augmentation = new ToolsAugmentation(), - Configuration = new PromptTemplateConfiguration() - { - Augmentation = new AugmentationConfiguration() { + Configuration = new PromptTemplateConfiguration() + { + Augmentation = new AugmentationConfiguration() + { Type = AugmentationType.Tools - } + } } }; var options = new AzureOpenAIModelOptions("test-key", "test-deployment", "https://test.openai.azure.com/") @@ -273,14 +276,78 @@ public async Task Test_CompletePromptAsync_AzureOpenAI_Chat_WithTools() // Assert Assert.Equal(PromptResponseStatus.Success, result.Status); Assert.NotNull(result.Message); - + Assert.NotNull(result.Message.ActionCalls); Assert.Single(result.Message.ActionCalls); Assert.Equal("testAction", result.Message.ActionCalls[0].Function.Name); - + Assert.Null(result.Error); Assert.Equal(ChatRole.Assistant, result.Message.Role); Assert.Null(result.Message.Content); } + + [Fact] + public async Task Test_CompletePromptAsync_AzureOpenAI_Streaming() + { + // Arrange + ITurnContext turnContext = new TurnContext(new NotImplementedAdapter(), new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + var streamer = new StreamingResponse(turnContext); + var state = new TurnState(); + await state.LoadStateAsync(null, turnContext); + state.SetValue("temp.streamer", streamer); + var renderedPrompt = new RenderedPromptSection>(new List(), length: 256, tooLong: false); + var promptMock = new Mock(new List(), -1, true, "\n\n"); + promptMock.Setup((prompt) => prompt.RenderAsMessagesAsync( + It.IsAny(), It.IsAny(), It.IsAny>>(), + It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync(renderedPrompt); + var promptTemplate = new PromptTemplate("test-prompt", promptMock.Object); + var options = new AzureOpenAIModelOptions("test-key", "test-deployment", "https://test.openai.azure.com/") + { + CompletionType = CompletionConfiguration.CompletionType.Chat, + LogRequests = true, + Stream = true, + }; + var clientMock = new Mock(); + var update = ModelReaderWriter.Read(BinaryData.FromString(@$"{{ + ""choices"": [ + {{ + ""finish_reason"": null, + ""delta"": {{ + ""role"": ""assistant"", + ""content"": ""chunk one"" + }} + }} + ] + }}")); + + TestAsyncResultCollection updates = new(update!, Mock.Of()); + + var response = new TestResponse(200, string.Empty); + clientMock.Setup((client) => + client + .GetChatClient(It.IsAny()) + .CompleteChatStreamingAsync(It.IsAny>(), It.IsAny(), It.IsAny()) + ).Returns(ClientResult.FromValue(updates, response)); + + var openAIModel = new OpenAIModel(options, loggerFactory: new TestLoggerFactory()); + openAIModel.GetType().GetField("_openAIClient", BindingFlags.Instance | BindingFlags.NonPublic)!.SetValue(openAIModel, clientMock.Object); + + // Act + var result = await openAIModel.CompletePromptAsync(turnContext, state, new PromptManager(), new GPTTokenizer(), promptTemplate); + + // Assert + Assert.Equal(PromptResponseStatus.Success, result.Status); + Assert.NotNull(result.Message); + Assert.Null(result.Error); + Assert.Equal(ChatRole.Assistant, result.Message.Role); + Assert.Equal("chunk one", result.Message.Content); + } + } -} +} \ No newline at end of file diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/Application/StreamingResponseTests.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/Application/StreamingResponseTests.cs new file mode 100644 index 000000000..5e8119b87 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/Application/StreamingResponseTests.cs @@ -0,0 +1,238 @@ + +using AdaptiveCards; +using Microsoft.Bot.Builder; +using Microsoft.Bot.Schema; +using Microsoft.Teams.AI.Application; +using Microsoft.Teams.AI.Exceptions; +using Microsoft.Teams.AI.Tests.TestUtils; + +namespace Microsoft.Teams.AI.Tests.Application +{ + public class StreamingResponseTests + { + [Fact] + public async Task Test_Informative_Update_SingleUpdate() + { + + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + streamer.QueueInformativeUpdate("starting"); + await streamer.WaitForQueue(); + + Assert.Equal(1, streamer.UpdatesSent()); + } + + [Fact] + public async Task Test_Informative_Update_DoubleUpdate() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + streamer.QueueInformativeUpdate("first"); + streamer.QueueInformativeUpdate("second"); + await streamer.WaitForQueue(); + + Assert.Equal(2, streamer.UpdatesSent()); + } + + [Fact] + public async Task Test_Informative_Update_AssertThrows() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + await streamer.EndStream(); + + // Act + var func = () => streamer.QueueInformativeUpdate("first"); + + // Assert + Exception ex = Assert.Throws(() => func()); + + Assert.Equal("The stream has already ended.", ex.Message); + } + + [Fact] + public async Task Test_SendTextChunk() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + streamer.QueueTextChunk("first"); + await streamer.WaitForQueue(); + streamer.QueueTextChunk("second"); + await streamer.WaitForQueue(); + Assert.Equal(2, streamer.UpdatesSent()); + } + + [Fact] + public async Task Test_SendTextChunk_AssertThrows() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + streamer.QueueTextChunk("first"); + await streamer.WaitForQueue(); + streamer.QueueTextChunk("second"); + await streamer.WaitForQueue(); + await streamer.EndStream(); + + // Act + var func = () => streamer.QueueTextChunk("third"); + + // Assert + Exception ex = Assert.Throws(() => func()); + + Assert.Equal("The stream has already ended.", ex.Message); + Assert.Equal(2, streamer.UpdatesSent()); + } + + [Fact] + public async Task Test_SendTextChunk_EndStreamImmediately() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + await streamer.EndStream(); + Assert.Equal(0, streamer.UpdatesSent()); + } + + [Fact] + public async Task Test_SendTextChunk_SendsFinalMessage() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + streamer.QueueTextChunk("first"); + await streamer.WaitForQueue(); + streamer.QueueTextChunk("second"); + await streamer.WaitForQueue(); + await streamer.EndStream(); + Assert.Equal(2, streamer.UpdatesSent()); + } + + [Fact] + public async Task Test_SendTextChunk_SendsFinalMessageWithAttachments() + { + // Arrange + Activity[]? activitiesToSend = null; + void CaptureSend(Activity[] arg) + { + activitiesToSend = arg; + } + var adapter = new SimpleAdapter(CaptureSend); + ITurnContext turnContext = new TurnContext(adapter, new Activity( + text: "hello", + channelId: "channelId", + recipient: new() { Id = "recipientId" }, + conversation: new() { Id = "conversationId" }, + from: new() { Id = "fromId" } + )); + StreamingResponse streamer = new(turnContext); + streamer.QueueTextChunk("first"); + await streamer.WaitForQueue(); + streamer.QueueTextChunk("second"); + + AdaptiveCard adaptiveCard = new(); + + var adaptiveCardAttachment = new Attachment() + { + ContentType = "application/vnd.microsoft.card.adaptive", + Content = adaptiveCard, + }; + + + streamer.Attachments = new List(); + streamer.Attachments.Add(adaptiveCardAttachment); + await streamer.WaitForQueue(); + await streamer.EndStream(); + Assert.Equal(2, streamer.UpdatesSent()); + Assert.Single(streamer.Attachments); + } + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/TestUtils/OpenAIModelFactory.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/TestUtils/OpenAIModelFactory.cs index 50f84ab84..fdff163f4 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/TestUtils/OpenAIModelFactory.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI.Tests/TestUtils/OpenAIModelFactory.cs @@ -1,5 +1,4 @@ -using Microsoft.VisualStudio.TestPlatform.CommunicationUtilities; -using OpenAI.Assistants; +using OpenAI.Assistants; using OpenAI.Files; using System.ClientModel; using System.ClientModel.Primitives; @@ -219,4 +218,24 @@ public ValueTask MoveNextAsync() } } + + internal sealed class TestAsyncResultCollection : AsyncCollectionResult where T : class + { + public List Items = new(); + + internal PipelineResponse _pipelineResponse; + + public TestAsyncResultCollection(T item, PipelineResponse response) + { + Items.Add(item); + _pipelineResponse = response; + } + +#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously + public override async IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) +#pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously + { + yield return FromValue(Items[0], _pipelineResponse); + } + } } diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs index 8b76b4144..e0097d935 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClient.cs @@ -5,7 +5,9 @@ using Microsoft.Teams.AI.AI.Prompts; using Microsoft.Teams.AI.AI.Prompts.Sections; using Microsoft.Teams.AI.AI.Validators; +using Microsoft.Teams.AI.Application; using Microsoft.Teams.AI.State; +using static Microsoft.Teams.AI.AI.Models.IPromptCompletionModelEvents; namespace Microsoft.Teams.AI.AI.Clients { @@ -62,6 +64,9 @@ public class LLMClient private readonly ILogger _logger; + private readonly string? _startStreamingMessage; + private ResponseReceivedHandler? _endStreamHandler; + /// /// Creates a new `LLMClient` instance. /// @@ -76,6 +81,9 @@ public LLMClient(LLMClientOptions options, ILoggerFactory? loggerFacto { throw new ArgumentException($"`{nameof(loggerFactory)}` parameter cannot be null if `LogRepairs` option is set to true"); } + + this._startStreamingMessage = Options.StartStreamingMessage; + this._endStreamHandler = Options.EndStreamHandler; } /// @@ -144,6 +152,64 @@ public async Task CompletePromptAsync( CancellationToken cancellationToken = default ) { + // Define event handlers + bool isStreaming = false; + StreamingResponse? streamer = null; + + BeforeCompletionHandler handleBeforeCompletion = new((object sender, BeforeCompletionEventArgs args) => + { + // Ignore events for other contexts + if (args.TurnContext != context) + { + return; + } + + if (args.Streaming) + { + isStreaming = true; + + // Create streamer and send initial message + streamer = new StreamingResponse(context); + memory.SetValue("temp.streamer", streamer); + if (!string.IsNullOrEmpty(this._startStreamingMessage)) + { + streamer.QueueInformativeUpdate(this._startStreamingMessage!); + } + } + }); + + ChunkReceivedHandler handleChunkReceived = new((object sender, ChunkReceivedEventArgs args) => + { + if (args.TurnContext != context || streamer == null) + { + return; + } + + // Send chunk to client + string text = args.Chunk.delta?.GetContent() ?? ""; + if (text.Length > 0) + { + streamer.QueueTextChunk(text); + } + }); + + // Subscribe to model events + if (this.Options.Model is IPromptCompletionStreamingModel) + { + IPromptCompletionStreamingModel model = (IPromptCompletionStreamingModel)Options.Model; + + if (model.Events != null) + { + model.Events.BeforeCompletion += handleBeforeCompletion; + model.Events.ChunkReceived += handleChunkReceived; + + if (this._endStreamHandler != null) + { + model.Events.ResponseReceived += this._endStreamHandler; + } + } + } + try { PromptResponse response = await this.Options.Model.CompletePromptAsync( @@ -159,6 +225,20 @@ public async Task CompletePromptAsync( { return response; } + else + { + if (isStreaming) + { + // Delete the message from the response to avoid sending it twice. + response.Message = null; + } + } + + // End the stream + if (streamer != null) + { + await streamer.EndStream(); + } // Get input message/s string inputVariable = Options.InputVariable; @@ -244,6 +324,25 @@ public async Task CompletePromptAsync( Error = new(ex.Message ?? string.Empty) }; } + finally + { + + // Unsubscribe to model events + if (this.Options.Model is IPromptCompletionStreamingModel) + { + IPromptCompletionStreamingModel model = (IPromptCompletionStreamingModel)Options.Model; + + if (model.Events != null) + { + model.Events.BeforeCompletion -= handleBeforeCompletion; + model.Events.ChunkReceived -= handleChunkReceived; + if (this._endStreamHandler != null) + { + model.Events.ResponseReceived -= this._endStreamHandler; + } + } + } + } } private async Task RepairResponseAsync( diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClientOptions.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClientOptions.cs index ee8b397c0..e9e47050b 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClientOptions.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Clients/LLMClientOptions.cs @@ -2,6 +2,7 @@ using Microsoft.Teams.AI.AI.Prompts; using Microsoft.Teams.AI.AI.Tokenizers; using Microsoft.Teams.AI.AI.Validators; +using static Microsoft.Teams.AI.AI.Models.IPromptCompletionModelEvents; namespace Microsoft.Teams.AI.AI.Clients { @@ -62,6 +63,16 @@ public class LLMClientOptions /// public bool LogRepairs { get; set; } = false; + /// + /// Optional message to send a client at the start of a streaming response. + /// + public string? StartStreamingMessage { get; set; } + + /// + /// Optional handler to run when a stream is about to conclude. + /// + public ResponseReceivedHandler? EndStreamHandler; + /// /// Creates an instance of `LLMClientOptions` /// diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/BaseOpenAIModelOptions.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/BaseOpenAIModelOptions.cs index 5112628ee..e1622f060 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/BaseOpenAIModelOptions.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/BaseOpenAIModelOptions.cs @@ -42,5 +42,10 @@ public abstract class BaseOpenAIModelOptions /// prompt to be sent as `user` messages instead. /// public bool? UseSystemMessages { get; set; } + + /// + /// Optional. Whether the model's responses should be streamed back using Server Sent Events (SSE). + /// + public bool? Stream { get; set; } = false; } } diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatCompletionToolCall.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatCompletionToolCall.cs index 80152ef5e..d3a484d8c 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatCompletionToolCall.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatCompletionToolCall.cs @@ -65,6 +65,16 @@ internal static ChatCompletionsToolCall FromChatToolCall(ChatToolCall toolCall) throw new TeamsAIException($"Invalid ChatCompletionsToolCall type: {toolCall.GetType().Name}"); } + + internal static ChatCompletionsToolCall FromStreamingChatToolCall(StreamingChatToolCallUpdate toolCall) + { + if (toolCall.Kind == ChatToolCallKind.Function) + { + return new ChatCompletionsFunctionToolCall(toolCall.Id, toolCall.FunctionName, toolCall.FunctionArgumentsUpdate); + } + + throw new TeamsAIException($"Invalid ChatCompletionsToolCall type: {toolCall.GetType().Name}"); + } } /// diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatMessage.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatMessage.cs index de994d6fa..871ddfc49 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatMessage.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/ChatMessage.cs @@ -1,4 +1,5 @@ -using Azure.AI.OpenAI; +using System.Diagnostics; +using Azure.AI.OpenAI; using Azure.AI.OpenAI.Chat; using Microsoft.Bot.Schema; using Microsoft.Teams.AI.Exceptions; @@ -138,6 +139,47 @@ internal ChatMessage(ChatCompletion chatCompletion) } } + /// + /// Initializes a new instance of ChatMessage using OpenAI.Chat.StreamingChatCompletionUpdate. + /// + /// The streaming chat completion update. + internal ChatMessage(StreamingChatCompletionUpdate streamingChatCompletionUpdate) + { + this.Role = ChatRole.Assistant; + + if (streamingChatCompletionUpdate.ContentUpdate.Count > 0) + { + this.Content = streamingChatCompletionUpdate.ContentUpdate[0].Text; + } + + if (streamingChatCompletionUpdate.FunctionCallUpdate != null && streamingChatCompletionUpdate.FunctionCallUpdate.FunctionName != string.Empty) + { + this.Name = streamingChatCompletionUpdate.FunctionCallUpdate.FunctionName; + this.FunctionCall = new FunctionCall(streamingChatCompletionUpdate.FunctionCallUpdate.FunctionName, streamingChatCompletionUpdate.FunctionCallUpdate.FunctionArgumentsUpdate); + } + + if (streamingChatCompletionUpdate.ToolCallUpdates != null && streamingChatCompletionUpdate.ToolCallUpdates.Count > 0) + { + this.ActionCalls = new List(); + foreach (StreamingChatToolCallUpdate toolCall in streamingChatCompletionUpdate.ToolCallUpdates) + { + this.ActionCalls.Add(new ActionCall(toolCall)); + } + } + +#pragma warning disable AOAI001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. + AzureChatMessageContext? azureContext = streamingChatCompletionUpdate.GetAzureMessageContext(); +#pragma warning restore AOAI001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed. + if (azureContext != null) + { + MessageContext? context = new(azureContext); + if (context != null) + { + this.Context = context; + } + } + } + internal OAI.Chat.ChatMessage ToOpenAIChatMessage() { Verify.NotNull(this.Role); @@ -355,6 +397,22 @@ public ActionCall(ChatToolCall toolCall) Function = new ActionFunction(toolCall.FunctionName, toolCall.FunctionArguments); } + /// + /// Creates an instance of from + /// + /// + /// Thrown if `toolCall` has an invalid type + public ActionCall(StreamingChatToolCallUpdate toolCall) + { + if (toolCall.Kind != ChatToolCallKind.Function) + { + throw new TeamsAIException($"Invalid ActionCall type: {toolCall.GetType().Name}"); + } + + Id = toolCall.Id; + Function = new ActionFunction(toolCall.FunctionName, toolCall.FunctionArgumentsUpdate); + } + internal ChatToolCall ToChatToolCall() { if (this.Type == ActionCallType.Function) diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionModelEvents.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionModelEvents.cs new file mode 100644 index 000000000..9619c8a17 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionModelEvents.cs @@ -0,0 +1,163 @@ +using Microsoft.Bot.Builder; +using Microsoft.Teams.AI.AI.Prompts; +using Microsoft.Teams.AI.AI.Tokenizers; +using Microsoft.Teams.AI.Application; +using Microsoft.Teams.AI.State; + +namespace Microsoft.Teams.AI.AI.Models +{ + /// + /// Events emitted by a IPromptCompletionStreamingModel. + /// + public interface IPromptCompletionModelEvents + { + + /// + /// Defines the method that is triggered before the model is called to complete a prompt. + /// + /// + public delegate void BeforeCompletionHandler(object sender, BeforeCompletionEventArgs args); + + /// + /// Defines the method that is triggered when a chunk is received from the model via streaming. + /// + /// + public delegate void ChunkReceivedHandler(object sender, ChunkReceivedEventArgs args); + + /// + /// Defines the method that is triggered after the model finishes returning a response. + /// + /// + public delegate void ResponseReceivedHandler(object sender, ResponseReceivedEventArgs args); + } + + /// + /// Defines the arguments for a BeforeCompletion event. + /// + public class BeforeCompletionEventArgs : EventArgs + { + /// + /// Current turn context. + /// + public ITurnContext TurnContext { get; set; } + + /// + /// An interface for accessing state values. + /// + public IMemory Memory { get; set; } + + /// + /// Functions to use when rendering the prompt. + /// + public IPromptFunctions> PromptFunctions { get; set; } + + /// + /// Tokenizer to use when rendering the prompt. + /// + public ITokenizer Tokenizer { get; set; } + + /// + /// Prompt template being completed. + /// + public PromptTemplate PromptTemplate { get; set; } + + /// + /// Returns 'true' if the prompt response is being streamed. + /// + public bool Streaming { get; set; } + + /// + /// Creates a new instance of the BeforeCompletionEventArgs. + /// + /// Current turn context. + /// An interface for accessing state. + /// Functions to use when rendering the prompt. + /// Tokenizer to ue when rendering the prompt. + /// Prompt template being configured. + /// Returns true if streaming is enabled. + public BeforeCompletionEventArgs(ITurnContext turnContext, IMemory memory, IPromptFunctions> promptFunctions, ITokenizer tokenizer, PromptTemplate promptTemplate, bool streaming) + { + this.TurnContext = turnContext; + this.Memory = memory; + this.PromptFunctions = promptFunctions; + this.Tokenizer = tokenizer; + this.PromptTemplate = promptTemplate; + this.Streaming = streaming; + } + } + + /// + /// Defines the arguments for a ChunkReceived event. + /// + public class ChunkReceivedEventArgs : EventArgs + { + /// + /// Current turn context. + /// + public ITurnContext TurnContext { get; set; } + + /// + /// An interface for accessing state values. + /// + public IMemory Memory { get; set; } + + /// + /// Message delta received from the model. + /// + public PromptChunk Chunk { get; set; } + + /// + /// Creates a new instance of ChunkReceivedEventArgs. + /// + /// Current turn context. + /// An interface for accessing state. + /// Message delta received from the model. + public ChunkReceivedEventArgs(ITurnContext turnContext, IMemory memory, PromptChunk chunk) + { + this.TurnContext = turnContext; + this.Memory = memory; + this.Chunk = chunk; + } + } + + /// + /// Defines the arguments for a ResponseReceived event. + /// + public class ResponseReceivedEventArgs : EventArgs + { + /// + /// Current turn context. + /// + public ITurnContext TurnContext { get; set; } + + /// + /// An interface for accessing state values. + /// + public IMemory Memory { get; set; } + + /// + /// Final response returned by the model. + /// + public PromptResponse Response { get; set; } + + /// + /// Streamer object instance. + /// + public StreamingResponse Streamer { get; set; } + + /// + /// Creates a new instance of ResponseReceivedEventArgs. + /// + /// Current turn context. + /// An interface for accessing state. + /// Response returned by the model. + /// Streamer instance. + public ResponseReceivedEventArgs(ITurnContext turnContext, IMemory memory, PromptResponse response, StreamingResponse streamer) + { + this.TurnContext = turnContext; + this.Memory = memory; + this.Response = response; + this.Streamer = streamer; + } + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionStreamingModel.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionStreamingModel.cs new file mode 100644 index 000000000..5eafbd013 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/IPromptCompletionStreamingModel.cs @@ -0,0 +1,14 @@ + +namespace Microsoft.Teams.AI.AI.Models +{ + /// + /// An AI model that can be used to complete streaming prompts. + /// + public interface IPromptCompletionStreamingModel : IPromptCompletionModel + { + /// + /// Optional. Events emitted by the model. + /// + PromptCompletionModelEmitter? Events { get; set; } + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/OpenAIModel.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/OpenAIModel.cs index 43937307f..b3fbad3c0 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/OpenAIModel.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/OpenAIModel.cs @@ -19,13 +19,14 @@ using Azure.AI.OpenAI.Chat; using OpenAI.Chat; using Microsoft.Recognizers.Text.NumberWithUnit.Dutch; +using Microsoft.Teams.AI.Application; namespace Microsoft.Teams.AI.AI.Models { /// /// A `PromptCompletionModel` for calling OpenAI and Azure OpenAI hosted models. /// - public class OpenAIModel : IPromptCompletionModel + public class OpenAIModel : IPromptCompletionStreamingModel { private readonly BaseOpenAIModelOptions _options; private readonly ILogger _logger; @@ -41,6 +42,11 @@ public class OpenAIModel : IPromptCompletionModel private static readonly string _userAgent = "AlphaWave"; + /// + /// Events emitted by the model. + /// + public PromptCompletionModelEmitter? Events { get; set; } = new(); + /// /// Initializes a new instance of the class. /// @@ -148,6 +154,13 @@ public async Task CompletePromptAsync(ITurnContext turnContext, CompletionConfiguration completion = promptTemplate.Configuration.Completion; int maxInputTokens = completion.MaxInputTokens; + if (this._options.Stream == true && Events != null) + { + // Signal start of completion + BeforeCompletionEventArgs beforeCompletionEventArgs = new(turnContext, memory, promptFunctions, tokenizer, promptTemplate, this._options.Stream ?? false); + Events.OnBeforeCompletion(beforeCompletionEventArgs); + } + // Setup tools if enabled bool isToolsAugmentation = promptTemplate.Configuration.Augmentation.Type == Augmentations.AugmentationType.Tools; List tools = new(); @@ -176,7 +189,7 @@ public async Task CompletePromptAsync(ITurnContext turnContext, { prompt.Output[0].Role = ChatRole.User; } - + if (_options.LogRequests!.Value) { _logger.LogTrace("CHAT PROMPT:"); @@ -220,13 +233,72 @@ public async Task CompletePromptAsync(ITurnContext turnContext, string model = promptTemplate.Configuration.Completion.Model ?? _deploymentName; - PipelineResponse? rawResponse; + PipelineResponse? rawResponse = null; ClientResult? chatCompletionsResponse = null; PromptResponse promptResponse = new(); try { - chatCompletionsResponse = await _openAIClient.GetChatClient(model).CompleteChatAsync(chatMessages, chatCompletionOptions, cancellationToken); - rawResponse = chatCompletionsResponse.GetRawResponse(); + if (this._options.Stream == true) + { + if (_options.LogRequests!.Value) + { + // TODO: Colorize + _logger.LogTrace("STREAM STARTED:"); + } + + // Enumerate the stream chunks + ChatMessage message = new(ChatRole.Assistant) + { + Content = "" + }; + AsyncCollectionResult streamCompletion = _openAIClient.GetChatClient(_deploymentName).CompleteChatStreamingAsync(chatMessages, chatCompletionOptions, cancellationToken); + + await foreach (StreamingChatCompletionUpdate delta in streamCompletion) + { + if (delta.Role != null) + { + string role = delta.Role.ToString(); + message.Role = new ChatRole(role); + } + + if (delta.ContentUpdate.Count > 0) + { + message.Content += delta.ContentUpdate[0].Text; + } + + // TODO: Handle tool calls + + ChatMessage currDeltaMessage = new(delta); + PromptChunk chunk = new() + { + delta = currDeltaMessage + }; + + ChunkReceivedEventArgs args = new(turnContext, memory, chunk); + + // Signal chunk received + if (_options.LogRequests!.Value) + { + _logger.LogTrace("CHUNK", delta); + } + + Events!.OnChunkReceived(args); + } + + promptResponse.Message = message; + + // Log stream completion + if (_options.LogRequests!.Value) + { + _logger.LogTrace("STREAM COMPLETED"); + } + } + else { + chatCompletionsResponse = await _openAIClient.GetChatClient(model).CompleteChatAsync(chatMessages, chatCompletionOptions, cancellationToken); + rawResponse = chatCompletionsResponse.GetRawResponse(); + promptResponse.Message = new ChatMessage(chatCompletionsResponse.Value); + } + promptResponse.Status = PromptResponseStatus.Success; } catch (ClientResultException e) @@ -249,37 +321,61 @@ public async Task CompletePromptAsync(ITurnContext turnContext, { // TODO: Colorize _logger.LogTrace("RESPONSE:"); - _logger.LogTrace($"status {rawResponse!.Status}"); _logger.LogTrace($"duration {(DateTime.UtcNow - startTime).TotalMilliseconds} ms"); - if (promptResponse.Status == PromptResponseStatus.Success) + if (promptResponse.Status == PromptResponseStatus.Success && chatCompletionsResponse != null) { - _logger.LogTrace(JsonSerializer.Serialize(chatCompletionsResponse!.Value, _serializerOptions)); + _logger.LogTrace(JsonSerializer.Serialize(chatCompletionsResponse.Value, _serializerOptions)); } - if (promptResponse.Status == PromptResponseStatus.RateLimited) + + if (rawResponse != null) { - _logger.LogTrace("HEADERS:"); - _logger.LogTrace(JsonSerializer.Serialize(rawResponse.Headers, _serializerOptions)); + _logger.LogTrace($"status {rawResponse!.Status}"); + if (promptResponse.Status == PromptResponseStatus.RateLimited) + { + _logger.LogTrace("HEADERS:"); + _logger.LogTrace(JsonSerializer.Serialize(rawResponse.Headers, _serializerOptions)); + } } } // Returns if the unsuccessful response - if (promptResponse.Status != PromptResponseStatus.Success || chatCompletionsResponse == null) + if (promptResponse.Status != PromptResponseStatus.Success || (chatCompletionsResponse == null && this._options.Stream == false)) { return promptResponse; } - // Process response - ChatCompletion chatCompletion = chatCompletionsResponse.Value; - List actionCalls = new(); - IReadOnlyList toolsCalls = chatCompletion.ToolCalls; - if (isToolsAugmentation && toolsCalls.Count > 0) + if (chatCompletionsResponse != null) { - foreach(ChatToolCall toolCall in toolsCalls) + // Process response + ChatCompletion chatCompletion = chatCompletionsResponse.Value; + List actionCalls = new(); + IReadOnlyList toolsCalls = chatCompletion.ToolCalls; + if (isToolsAugmentation && toolsCalls.Count > 0) { - actionCalls.Add(new ActionCall(toolCall)); + foreach (ChatToolCall toolCall in toolsCalls) + { + actionCalls.Add(new ActionCall(toolCall)); + } + } + } + + if (this._options.Stream == true) + { + StreamingResponse? streamer = (StreamingResponse?)memory.GetValue("temp.streamer"); + + if (streamer == null) + { + throw new TeamsAIException("The streaming object is empty"); } + + ResponseReceivedEventArgs responseReceivedEventArgs = new(turnContext, memory, promptResponse, streamer); + Events!.OnResponseReceived(responseReceivedEventArgs); + + // Let any pending events flush before returning + await Task.Delay(TimeSpan.FromSeconds(0)); } + List? inputs = new(); int lastMessage = prompt.Output.Count - 1; @@ -299,13 +395,11 @@ public async Task CompletePromptAsync(ITurnContext turnContext, break; } } - int firstMessage = i+1; + int firstMessage = i + 1; inputs = prompt.Output.GetRange(firstMessage, prompt.Output.Count - firstMessage); } } - promptResponse.Input = inputs; - promptResponse.Message = new ChatMessage(chatCompletionsResponse.Value); return promptResponse; @@ -352,4 +446,4 @@ private void AddAzureChatExtensionConfigurations(ChatCompletionOptions options, } } } -} +} \ No newline at end of file diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptChunk.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptChunk.cs new file mode 100644 index 000000000..5488bbbb4 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptChunk.cs @@ -0,0 +1,13 @@ +namespace Microsoft.Teams.AI.AI.Models +{ + /// + /// Streaming chunk passed in the `ChunkReceived` event. + /// + public class PromptChunk + { + /// + /// Delta for the response message being buffered up. + /// + public ChatMessage? delta { get; set; } + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptCompletionModelEmitter.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptCompletionModelEmitter.cs new file mode 100644 index 000000000..2454d24f5 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Models/PromptCompletionModelEmitter.cs @@ -0,0 +1,52 @@ +using static Microsoft.Teams.AI.AI.Models.IPromptCompletionModelEvents; + +namespace Microsoft.Teams.AI.AI.Models +{ + /// + /// Emitter class that handles the subscription of streaming events. + /// + public class PromptCompletionModelEmitter + { + /// + /// Triggered before the model is called to complete a prompt. + /// + public event BeforeCompletionHandler? BeforeCompletion; + + /// + /// Triggered when a chunk is received from the model via streaming. + /// + public event ChunkReceivedHandler? ChunkReceived; + + /// + /// Triggered after the model finishes returning a response. + /// + public event ResponseReceivedHandler? ResponseReceived; + + /// + /// Invokes the BeforeCompletionHandler. + /// + /// + public virtual void OnBeforeCompletion(BeforeCompletionEventArgs args) + { + BeforeCompletion?.Invoke(this, args); + } + + /// + /// Invokes the ChunkReceivedHandler. + /// + /// + public virtual void OnChunkReceived(ChunkReceivedEventArgs args) + { + ChunkReceived?.Invoke(this, args); + } + + /// + /// Invokes the ResponseReceivedHandler. + /// + /// + public virtual void OnResponseReceived(ResponseReceivedEventArgs args) + { + ResponseReceived?.Invoke(this, args); + } + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs index e34196516..f3751f6b4 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlanner.cs @@ -111,14 +111,23 @@ public async Task ContinueTaskAsync(ITurnContext context, TState state, AI throw new Exception(response.Error?.Message ?? "[Action Planner]: an error has occurred"); } - Plan? plan = await template.Augmentation.CreatePlanFromResponseAsync(context, state, response, cancellationToken); + // Check to see if we have a response + // When a streaming response is used, the response message is undefined. + if (response.Message != null) + { + Plan? plan = await template.Augmentation.CreatePlanFromResponseAsync(context, state, response, cancellationToken); + + if (plan == null) + { + throw new Exception("[Action Planner]: failed to create plan"); + } - if (plan == null) + return plan; + } + else { - throw new Exception("[Action Planner]: failed to create plan"); + return new Plan(); } - - return plan; } /// @@ -164,7 +173,9 @@ public async Task CompletePromptAsync( Tokenizer = this.Options.Tokenizer, MaxHistoryMessages = this.Prompts.Options.MaxHistoryMessages, MaxRepairAttempts = this.Options.MaxRepairAttempts, - LogRepairs = this.Options.LogRepairs + LogRepairs = this.Options.LogRepairs, + StartStreamingMessage = this.Options.StartStreamingMessage, + EndStreamHandler = this.Options.EndStreamHandler, }, this._logger); return await client.CompletePromptAsync(context, memory, this.Prompts, cancellationToken); diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs index 042258b29..25a25e281 100644 --- a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/AI/Planners/ActionPlannerOptions.cs @@ -3,6 +3,7 @@ using Microsoft.Teams.AI.AI.Prompts; using Microsoft.Teams.AI.AI.Tokenizers; using Microsoft.Teams.AI.State; +using static Microsoft.Teams.AI.AI.Models.IPromptCompletionModelEvents; namespace Microsoft.Teams.AI.AI.Planners { @@ -69,6 +70,16 @@ public delegate Task ActionPlannerPromptFactory( ActionPlanner planner ); + /// + /// Optional message to send a client at the start of a streaming response. + /// + public string? StartStreamingMessage { get; set; } + + /// + /// Optional handler to run when a stream is about to conclude. + /// + public ResponseReceivedHandler? EndStreamHandler; + /// /// Creates an instance of `ActionPlannerOptions`. /// diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamType.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamType.cs new file mode 100644 index 000000000..d8bb264fd --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamType.cs @@ -0,0 +1,23 @@ +namespace Microsoft.Teams.AI.Application +{ + /// + /// The type of streaming message being sent. + /// + public enum StreamType + { + /// + /// An informative update. + /// + Informative, + + /// + /// A chunk of partial message text. + /// + Streaming, + + /// + /// The final message. + /// + Final + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingChannelData.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingChannelData.cs new file mode 100644 index 000000000..3c79fa397 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingChannelData.cs @@ -0,0 +1,38 @@ +using Newtonsoft.Json; +using Newtonsoft.Json.Converters; +using Newtonsoft.Json.Serialization; + +namespace Microsoft.Teams.AI.Application +{ + /// + /// Structure of the outgoing channelData field for streaming responses. + /// + /// The expected sequence of streamTypes is: + /// `informative`, `streaming`, `streaming`, ..., `final`. + /// + /// Once a `final` message is sent, the stream is considered ended. + /// + public class StreamingChannelData + { + /// + /// The type of message being sent. + /// + [JsonConverter(typeof(StringEnumConverter), typeof(CamelCaseNamingStrategy))] + [JsonProperty(PropertyName = "streamType")] + public StreamType StreamType { get; set; } + + /// + /// Sequence number of the message in the stream. + /// Starts at 1 for the first message and increments from there. + /// + [JsonProperty(PropertyName = "streamSequence")] + public int? StreamSequence { get; set; } = 1; + + /// + /// ID of the stream. + /// Assigned after the initial update is sent. + /// + [JsonProperty(PropertyName = "streamId")] + public string? streamId { get; set; } + } +} diff --git a/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingResponse.cs b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingResponse.cs new file mode 100644 index 000000000..f324118e7 --- /dev/null +++ b/dotnet/packages/Microsoft.TeamsAI/Microsoft.TeamsAI/Application/StreamingResponse.cs @@ -0,0 +1,251 @@ +using Microsoft.Bot.Builder; +using Microsoft.Bot.Schema; +using Microsoft.Teams.AI.Exceptions; + +namespace Microsoft.Teams.AI.Application +{ + + /// + /// A helper class for streaming responses to the client. + /// This class is used to send a series of updates to the client in a single response. The expected + /// sequence of calls is: + /// + /// `QueueInformativeUpdate()`, `QueueTextChunk()`, `QueueTextChunk()`, ..., `EndStream()`. + /// + /// Once `EndStream()` is called, the stream is considered ended and no further updates can be sent. + /// + public class StreamingResponse + { + private readonly ITurnContext _context; + private int _nextSequence = 1; + private bool _ended = false; + + // Queue for outgoing activities + private IList> _queue = []; + private Task? _queueSync; + private bool _chunkQueued = false; + + /// + /// Fluent interface for accessing the attachments. + /// + public IList? Attachments { get; set; } = []; + + /// + /// Gets the stream ID of the current response. + /// Assigned after the initial update is sent. + /// + public string? StreamId { get; private set; } + + /// + /// Fluent interface for accessing the message. + /// + public string Message { get; private set; } = ""; + + /// + /// Gets the number of updates sent for the stream. + /// + /// Number of updates sent so far. + public int UpdatesSent() => this._nextSequence - 1; + + /// + /// Creates a new instance of the class. + /// + /// Context for the current turn of conversation with the user. + public StreamingResponse(ITurnContext context) + { + this._context = context; + } + + /// + /// Waits for the outgoing activity queue to be empty. + /// + /// + public Task WaitForQueue() + { + return this._queueSync != null ? this._queueSync : Task.CompletedTask; + } + + /// + /// Queues an informative update to be sent to the client. + /// + /// Text of the update to send. + /// Throws if the stream has already ended. + public void QueueInformativeUpdate(string text) + { + if (this._ended) + { + throw new TeamsAIException("The stream has already ended."); + } + + QueueActivity(() => new Activity + { + Type = ActivityTypes.Typing, + Text = text, + ChannelData = new StreamingChannelData + { + StreamType = StreamType.Informative, + StreamSequence = this._nextSequence++, + } + }); + } + + /// + /// Queues a chunk of partial message text to be sent to the client. + /// + /// Partial text of the message to send. + /// Throws if the stream has already ended. + public void QueueTextChunk(string text) + { + if (this._ended) + { + throw new TeamsAIException("The stream has already ended."); + } + + Message += text; + QueueNextChunk(); + } + + /// + /// Ends the stream by sending the final message to the client. + /// + /// A Task representing the async operation + /// Throws if the stream has already ended. + public Task EndStream() + { + if (this._ended) + { + throw new TeamsAIException("The stream has already ended."); + } + + this._ended = true; + QueueNextChunk(); + + // Wait for the queue to drain + return this._queueSync!; + } + + /// + /// Queue an activity to be sent to the client. + /// + /// + private void QueueActivity(Func factory) + { + this._queue.Add(factory); + + // If there's no sync in progress, start one + if (this._queueSync == null) + { + this._queueSync = DrainQueue(); + } + } + + /// + /// Queue the next chunk of text to be sent to the client. + /// + private void QueueNextChunk() + { + // Check if we are already waiting to send a chunk + if (this._chunkQueued) + { + return; + } + + // Queue a chunk of text to be sent + this._chunkQueued = true; + QueueActivity(() => + { + this._chunkQueued = false; + + if (this._ended) + { + // Send final message + return new Activity + { + Type = ActivityTypes.Message, + Text = Message, + Attachments = Attachments != null ? Attachments : [], + ChannelData = new StreamingChannelData + { + StreamType = StreamType.Final, + } + }; + } + else + { + // Send typing activity + return new Activity + { + Type = ActivityTypes.Typing, + Text = Message, + ChannelData = new StreamingChannelData + { + StreamType = StreamType.Streaming, + StreamSequence = this._nextSequence++, + } + }; + + } + }); + } + + /// + /// Sends any queued activities to the client until the queue is empty. + /// + private async Task DrainQueue() + { + await Task.Run(async () => + { + try + { + while (this._queue.Count > 0) + { + // Get next activity from queue + Activity activity = _queue[0](); + await SendActivity(activity).ConfigureAwait(false); + _queue.RemoveAt(0); + } + } + + finally + { + // Queue is empty, mark as idle + this._queueSync = null; + } + }).ConfigureAwait(false); + } + + /// + /// Sends an activity to the client and saves the stream ID returned. + /// + /// The activity to send. + /// A Task representing the async operation. + private async Task SendActivity(Activity activity) + { + // Set activity ID to the assigned stream ID + if (!string.IsNullOrEmpty(StreamId)) + { + StreamingChannelData oldChannelData = activity.GetChannelData(); + StreamingChannelData updatedChannelData = new() + { + streamId = StreamId, + StreamType = oldChannelData.StreamType, + }; + + if (oldChannelData.StreamSequence != null) + { + updatedChannelData.StreamSequence = oldChannelData.StreamSequence; + } + + activity.ChannelData = updatedChannelData; + } + + ResourceResponse response = await this._context.SendActivityAsync(activity).ConfigureAwait(false); + + // Save assigned stream ID + if (string.IsNullOrEmpty(StreamId)) + { + StreamId = response.Id; + } + } + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/.editorconfig b/dotnet/samples/04.ai.g.teamsChefBot-streaming/.editorconfig new file mode 100644 index 000000000..755bfa6c1 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/.editorconfig @@ -0,0 +1,240 @@ +# Remove the line below if you want to inherit .editorconfig settings from higher directories +root = true + +# C# files +[*.cs] + +#### Core EditorConfig Options #### + +# Indentation and spacing +indent_size = 4 +indent_style = space +tab_width = 4 + +# New line preferences +end_of_line = crlf +insert_final_newline = false + +#### .NET Coding Conventions #### + +# Organize usings +dotnet_separate_import_directive_groups = false +dotnet_sort_system_directives_first = false +file_header_template = unset + +# this. and Me. preferences +dotnet_style_qualification_for_event = false +dotnet_style_qualification_for_field = false +dotnet_style_qualification_for_method = false +dotnet_style_qualification_for_property = false + +# Language keywords vs BCL types preferences +dotnet_style_predefined_type_for_locals_parameters_members = true +dotnet_style_predefined_type_for_member_access = true + +# Parentheses preferences +dotnet_style_parentheses_in_arithmetic_binary_operators = always_for_clarity +dotnet_style_parentheses_in_other_binary_operators = always_for_clarity +dotnet_style_parentheses_in_other_operators = never_if_unnecessary +dotnet_style_parentheses_in_relational_binary_operators = always_for_clarity + +# Modifier preferences +dotnet_style_require_accessibility_modifiers = for_non_interface_members + +# Expression-level preferences +dotnet_style_coalesce_expression = true +dotnet_style_collection_initializer = true +dotnet_style_explicit_tuple_names = true +dotnet_style_namespace_match_folder = true +dotnet_style_null_propagation = true +dotnet_style_object_initializer = true +dotnet_style_operator_placement_when_wrapping = beginning_of_line +dotnet_style_prefer_auto_properties = true +dotnet_style_prefer_collection_expression = when_types_loosely_match +dotnet_style_prefer_compound_assignment = true +dotnet_style_prefer_conditional_expression_over_assignment = true +dotnet_style_prefer_conditional_expression_over_return = true +dotnet_style_prefer_foreach_explicit_cast_in_source = when_strongly_typed +dotnet_style_prefer_inferred_anonymous_type_member_names = true +dotnet_style_prefer_inferred_tuple_names = true +dotnet_style_prefer_is_null_check_over_reference_equality_method = true +dotnet_style_prefer_simplified_boolean_expressions = true +dotnet_style_prefer_simplified_interpolation = true + +# Field preferences +dotnet_style_readonly_field = true + +# Parameter preferences +dotnet_code_quality_unused_parameters = all + +# Suppression preferences +dotnet_remove_unnecessary_suppression_exclusions = none + +# New line preferences +dotnet_style_allow_multiple_blank_lines_experimental = true +dotnet_style_allow_statement_immediately_after_block_experimental = true + +#### C# Coding Conventions #### + +# var preferences +csharp_style_var_elsewhere = false +csharp_style_var_for_built_in_types = false +csharp_style_var_when_type_is_apparent = false + +# Expression-bodied members +csharp_style_expression_bodied_accessors = true:silent +csharp_style_expression_bodied_constructors = false:silent +csharp_style_expression_bodied_indexers = true:silent +csharp_style_expression_bodied_lambdas = true:silent +csharp_style_expression_bodied_local_functions = false:silent +csharp_style_expression_bodied_methods = false:silent +csharp_style_expression_bodied_operators = false:silent +csharp_style_expression_bodied_properties = true:silent + +# Pattern matching preferences +csharp_style_pattern_matching_over_as_with_null_check = true +csharp_style_pattern_matching_over_is_with_cast_check = true +csharp_style_prefer_extended_property_pattern = true +csharp_style_prefer_not_pattern = true +csharp_style_prefer_pattern_matching = true +csharp_style_prefer_switch_expression = true + +# Null-checking preferences +csharp_style_conditional_delegate_call = true + +# Modifier preferences +csharp_prefer_static_anonymous_function = true +csharp_prefer_static_local_function = true +csharp_preferred_modifier_order = public,private,protected,internal,file,static,extern,new,virtual,abstract,sealed,override,readonly,unsafe,required,volatile,async +csharp_style_prefer_readonly_struct = true +csharp_style_prefer_readonly_struct_member = true + +# Code-block preferences +csharp_prefer_braces = true:silent +csharp_prefer_simple_using_statement = true:suggestion +csharp_style_namespace_declarations = block_scoped:silent +csharp_style_prefer_method_group_conversion = true:silent +csharp_style_prefer_primary_constructors = true:suggestion +csharp_style_prefer_top_level_statements = true:silent + +# Expression-level preferences +csharp_prefer_simple_default_expression = true +csharp_style_deconstructed_variable_declaration = true +csharp_style_implicit_object_creation_when_type_is_apparent = true +csharp_style_inlined_variable_declaration = true +csharp_style_prefer_index_operator = true +csharp_style_prefer_local_over_anonymous_function = true +csharp_style_prefer_null_check_over_type_check = true +csharp_style_prefer_range_operator = true +csharp_style_prefer_tuple_swap = true +csharp_style_prefer_utf8_string_literals = true +csharp_style_throw_expression = true +csharp_style_unused_value_assignment_preference = discard_variable +csharp_style_unused_value_expression_statement_preference = discard_variable + +# 'using' directive preferences +csharp_using_directive_placement = outside_namespace:silent + +# New line preferences +csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental = true +csharp_style_allow_blank_line_after_token_in_arrow_expression_clause_experimental = true +csharp_style_allow_blank_line_after_token_in_conditional_expression_experimental = true +csharp_style_allow_blank_lines_between_consecutive_braces_experimental = true +csharp_style_allow_embedded_statements_on_same_line_experimental = true + +#### C# Formatting Rules #### + +# New line preferences +csharp_new_line_before_catch = true +csharp_new_line_before_else = true +csharp_new_line_before_finally = true +csharp_new_line_before_members_in_anonymous_types = true +csharp_new_line_before_members_in_object_initializers = true +csharp_new_line_before_open_brace = all +csharp_new_line_between_query_expression_clauses = true + +# Indentation preferences +csharp_indent_block_contents = true +csharp_indent_braces = false +csharp_indent_case_contents = true +csharp_indent_case_contents_when_block = true +csharp_indent_labels = one_less_than_current +csharp_indent_switch_labels = true + +# Space preferences +csharp_space_after_cast = false +csharp_space_after_colon_in_inheritance_clause = true +csharp_space_after_comma = true +csharp_space_after_dot = false +csharp_space_after_keywords_in_control_flow_statements = true +csharp_space_after_semicolon_in_for_statement = true +csharp_space_around_binary_operators = before_and_after +csharp_space_around_declaration_statements = false +csharp_space_before_colon_in_inheritance_clause = true +csharp_space_before_comma = false +csharp_space_before_dot = false +csharp_space_before_open_square_brackets = false +csharp_space_before_semicolon_in_for_statement = false +csharp_space_between_empty_square_brackets = false +csharp_space_between_method_call_empty_parameter_list_parentheses = false +csharp_space_between_method_call_name_and_opening_parenthesis = false +csharp_space_between_method_call_parameter_list_parentheses = false +csharp_space_between_method_declaration_empty_parameter_list_parentheses = false +csharp_space_between_method_declaration_name_and_open_parenthesis = false +csharp_space_between_method_declaration_parameter_list_parentheses = false +csharp_space_between_parentheses = false +csharp_space_between_square_brackets = false + +# Wrapping preferences +csharp_preserve_single_line_blocks = true +csharp_preserve_single_line_statements = true + +#### Naming styles #### + +# Naming rules + +dotnet_naming_rule.interface_should_be_begins_with_i.severity = suggestion +dotnet_naming_rule.interface_should_be_begins_with_i.symbols = interface +dotnet_naming_rule.interface_should_be_begins_with_i.style = begins_with_i + +dotnet_naming_rule.types_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.types_should_be_pascal_case.symbols = types +dotnet_naming_rule.types_should_be_pascal_case.style = pascal_case + +dotnet_naming_rule.non_field_members_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.non_field_members_should_be_pascal_case.symbols = non_field_members +dotnet_naming_rule.non_field_members_should_be_pascal_case.style = pascal_case + +# Symbol specifications + +dotnet_naming_symbols.interface.applicable_kinds = interface +dotnet_naming_symbols.interface.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected +dotnet_naming_symbols.interface.required_modifiers = + +dotnet_naming_symbols.types.applicable_kinds = class, struct, interface, enum +dotnet_naming_symbols.types.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected +dotnet_naming_symbols.types.required_modifiers = + +dotnet_naming_symbols.non_field_members.applicable_kinds = property, event, method +dotnet_naming_symbols.non_field_members.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected +dotnet_naming_symbols.non_field_members.required_modifiers = + +# Naming styles + +dotnet_naming_style.pascal_case.required_prefix = +dotnet_naming_style.pascal_case.required_suffix = +dotnet_naming_style.pascal_case.word_separator = +dotnet_naming_style.pascal_case.capitalization = pascal_case + +dotnet_naming_style.begins_with_i.required_prefix = I +dotnet_naming_style.begins_with_i.required_suffix = +dotnet_naming_style.begins_with_i.word_separator = +dotnet_naming_style.begins_with_i.capitalization = pascal_case + +[*.{cs,vb}] +dotnet_style_operator_placement_when_wrapping = beginning_of_line +tab_width = 4 +indent_size = 4 +end_of_line = crlf +dotnet_style_coalesce_expression = true:suggestion +dotnet_style_null_propagation = true:suggestion \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/.gitignore b/dotnet/samples/04.ai.g.teamsChefBot-streaming/.gitignore new file mode 100644 index 000000000..d9db69b0e --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/.gitignore @@ -0,0 +1,25 @@ +# TeamsFx files +build +appPackage/build +env/.env.*.user +env/.env.local +appsettings.Development.json +.deployment + +# User-specific files +*.user + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +bld/ +[Bb]in/ +[Oo]bj/ +[Ll]og/ + +# VS files +.vs/ diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/ActionHandlers.cs b/dotnet/samples/04.ai.g.teamsChefBot-streaming/ActionHandlers.cs new file mode 100644 index 000000000..af369ff4f --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/ActionHandlers.cs @@ -0,0 +1,25 @@ +using Microsoft.Bot.Builder; +using Microsoft.Teams.AI.AI.Action; +using Microsoft.Teams.AI.AI; +using System.Text.Json; + +namespace TeamsChefBot +{ + public class ActionHandlers + { + [Action(AIConstants.FlaggedInputActionName)] + public async Task OnFlaggedInput([ActionTurnContext] ITurnContext turnContext, [ActionParameters] Dictionary entities) + { + string entitiesJsonString = JsonSerializer.Serialize(entities); + await turnContext.SendActivityAsync($"I'm sorry your message was flagged: {entitiesJsonString}"); + return ""; + } + + [Action(AIConstants.FlaggedOutputActionName)] + public async Task OnFlaggedOutput([ActionTurnContext] ITurnContext turnContext) + { + await turnContext.SendActivityAsync("I'm not allowed to talk about such things."); + return ""; + } + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/AdapterWithErrorHandler.cs b/dotnet/samples/04.ai.g.teamsChefBot-streaming/AdapterWithErrorHandler.cs new file mode 100644 index 000000000..761f1df38 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/AdapterWithErrorHandler.cs @@ -0,0 +1,26 @@ +using Microsoft.Bot.Builder.TraceExtensions; +using Microsoft.Teams.AI; + +namespace TeamsChefBot +{ + public class AdapterWithErrorHandler : TeamsAdapter + { + public AdapterWithErrorHandler(IConfiguration configuration, ILogger logger) + : base(configuration, null, logger) + { + OnTurnError = async (turnContext, exception) => + { + // Log any leaked exception from the application. + // NOTE: In production environment, you should consider logging this to + // Azure Application Insights. Visit https://aka.ms/bottelemetry to see how + // to add telemetry capture to your bot. + logger.LogError(exception, $"[OnTurnError] unhandled error : {exception.Message}"); + // Send a message to the user + await turnContext.SendActivityAsync($"The bot encountered an unhandled error: {exception.Message}"); + await turnContext.SendActivityAsync("To continue to run this bot, please fix the bot source code."); + // Send a trace activity + await turnContext.TraceActivityAsync("OnTurnError Trace", exception.Message, "https://www.botframework.com/schemas/error", "TurnError"); + }; + } + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/Config.cs b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Config.cs new file mode 100644 index 000000000..8d54a4faa --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Config.cs @@ -0,0 +1,29 @@ +namespace TeamsChefBot +{ + public class ConfigOptions + { + public string? BOT_ID { get; set; } + public string? BOT_PASSWORD { get; set; } + public OpenAIConfigOptions? OpenAI { get; set; } + public AzureConfigOptions? Azure { get; set; } + } + + /// + /// Options for Open AI + /// + public class OpenAIConfigOptions + { + public string? ApiKey { get; set; } + } + + /// + /// Options for Azure OpenAI and Azure Content Safety + /// + public class AzureConfigOptions + { + public string? OpenAIApiKey { get; set; } + public string? OpenAIEndpoint { get; set; } + public string? ContentSafetyApiKey { get; set; } + public string? ContentSafetyEndpoint { get; set; } + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/Controllers/BotController.cs b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Controllers/BotController.cs new file mode 100644 index 000000000..8a59d58fe --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Controllers/BotController.cs @@ -0,0 +1,32 @@ +using Microsoft.AspNetCore.Mvc; +using Microsoft.Bot.Builder; +using Microsoft.Teams.AI; + +namespace TeamsChefBot.Controllers +{ + [Route("api/messages")] + [ApiController] + public class BotController : ControllerBase + { + private readonly TeamsAdapter _adapter; + private readonly IBot _bot; + + public BotController(TeamsAdapter adapter, IBot bot) + { + _adapter = adapter; + _bot = bot; + } + + [HttpPost] + public async Task PostAsync(CancellationToken cancellationToken = default) + { + await _adapter.ProcessAsync + ( + Request, + Response, + _bot, + cancellationToken + ); + } + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/KernelMemoryDataSource.cs b/dotnet/samples/04.ai.g.teamsChefBot-streaming/KernelMemoryDataSource.cs new file mode 100644 index 000000000..5d93a5953 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/KernelMemoryDataSource.cs @@ -0,0 +1,120 @@ +using Microsoft.Bot.Builder; +using Microsoft.KernelMemory; +using Microsoft.Teams.AI.AI.DataSources; +using Microsoft.Teams.AI.AI.Prompts.Sections; +using Microsoft.Teams.AI.AI.Tokenizers; +using Microsoft.Teams.AI.State; +using System.Text; + +namespace TeamsChefBot +{ + /// + /// The class connects the Kernel Memory library data source to the bot. + /// Kernel Memory is a library that allows you to index and query any data using LLM and natural language, + /// tracking sources and showing citations (https://github.com/microsoft/kernel-memory). + /// + public class KernelMemoryDataSource : IDataSource + { + private readonly IKernelMemory _kernelMemory; + private readonly Task? _ingestTask; + + public KernelMemoryDataSource(string name, IKernelMemory memoryInstance) + { + ArgumentNullException.ThrowIfNull(memoryInstance); + + this._kernelMemory = memoryInstance; + this.Name = name; + this._ingestTask = this.IngestAsync(); + } + + public string Name { get; } + + /// + /// Loads documents from the 'files' folder into Kernel Memory's in-memory vector database. + /// + /// + private async Task IngestAsync() + { + Console.WriteLine("Loading documents from the 'files' folder into Kernel Memory's in-memory vector database"); + + var importTasks = new List(); + string[] Documents = Directory.GetFiles("files"); + + int i = 0; + foreach (string doc in Documents) + { + importTasks.Add(this._kernelMemory.ImportDocumentAsync(doc, documentId: $"doc-{i}")); + i++; + } + + await Task.WhenAll(importTasks); + } + + public async Task> RenderDataAsync(ITurnContext context, IMemory memory, ITokenizer tokenizer, int maxTokens, CancellationToken cancellationToken = default) + { + if (this._ingestTask?.IsCompleted == false) + { + // Wait for ingestion to complete + await _ingestTask; + } + + string? ask = memory.GetValue("temp.input") as string; + + if (ask == null) + { + return new RenderedPromptSection(string.Empty, 0); + } + + // Query index for all relevant documents + SearchResult result = await this._kernelMemory.SearchAsync(ask); + + if (result.NoResult) + { + Console.WriteLine("No results when querying Kernel Memory found"); + return new RenderedPromptSection(string.Empty, 0); + } + + List citations = result.Results; + + // Add documents until you run out of tokens + int length = 0; + StringBuilder output = new(); + string connector = ""; + bool maxTokensReached = false; + foreach (Citation citation in citations) + { + // Start a new doc + StringBuilder doc = new(); + doc.Append($"{connector}\n"); + length += tokenizer.Encode($"{connector}\n").Count; + // Add ending tag count to token count + length += tokenizer.Encode("\n").Count; + + foreach (var partition in citation.Partitions) + { + // Add the partition to the doc + int partitionLength = tokenizer.Encode(partition.Text).Count; + int remainingTokens = maxTokens - (length + partitionLength); + if (remainingTokens < 0) + { + maxTokensReached = true; + break; + } + length += partitionLength; + doc.Append($"{partition.Text}\n"); + } + + doc.Append("\n"); + output.Append(doc.ToString()); + connector = "\n\n"; + + if (maxTokensReached) + { + break; + } + } + + return new RenderedPromptSection(output.ToString(), length, length > maxTokens); + } + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/Program.cs b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Program.cs new file mode 100644 index 000000000..3df8d3f53 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Program.cs @@ -0,0 +1,187 @@ +using Microsoft.Bot.Builder; +using Microsoft.Bot.Builder.Integration.AspNet.Core; +using Microsoft.Bot.Connector.Authentication; +using Microsoft.Teams.AI.AI.Models; +using Microsoft.Teams.AI.AI.Planners; +using Microsoft.Teams.AI.AI.Prompts; +using Microsoft.Teams.AI.State; +using Microsoft.Teams.AI; +using TeamsChefBot; +using Microsoft.KernelMemory; +using Microsoft.Teams.AI.AI.DataSources; +using static Microsoft.Teams.AI.AI.Models.IPromptCompletionModelEvents; +using AdaptiveCards; +using System.IO; +using Microsoft.Bot.Schema; +using Microsoft.Teams.AI.Application; + +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddControllers(); +builder.Services.AddHttpClient("WebClient", client => client.Timeout = TimeSpan.FromSeconds(600)); +builder.Services.AddHttpContextAccessor(); + +// Prepare Configuration for ConfigurationBotFrameworkAuthentication +var config = builder.Configuration.Get()!; +builder.Configuration["MicrosoftAppType"] = "MultiTenant"; +builder.Configuration["MicrosoftAppId"] = config.BOT_ID; +builder.Configuration["MicrosoftAppPassword"] = config.BOT_PASSWORD; + +// Create the Bot Framework Authentication to be used with the Bot Adapter. +builder.Services.AddSingleton(); + +// Create the Cloud Adapter with error handling enabled. +// Note: some classes expect a BotAdapter and some expect a BotFrameworkHttpAdapter, so +// register the same adapter instance for all types. +builder.Services.AddSingleton(); +builder.Services.AddSingleton(sp => sp.GetService()!); +builder.Services.AddSingleton(sp => sp.GetService()!); + +builder.Services.AddSingleton(); + +// Create AI Model +if (!string.IsNullOrEmpty(config.OpenAI?.ApiKey)) +{ + // Create OpenAI Model + builder.Services.AddSingleton (sp => new( + new OpenAIModelOptions(config.OpenAI.ApiKey, "gpt-4o") + { + LogRequests = true, + Stream = true, + }, + sp.GetService() + )); + + // Create Kernel Memory Serverless instance using OpenAI embeddings API + builder.Services.AddSingleton((sp) => + { + return new KernelMemoryBuilder() + .WithOpenAIDefaults(config.OpenAI.ApiKey) + .WithSimpleFileStorage() + .Build(); + }); +} +else if (!string.IsNullOrEmpty(config.Azure?.OpenAIApiKey) && !string.IsNullOrEmpty(config.Azure.OpenAIEndpoint)) +{ + // Create Azure OpenAI Model + builder.Services.AddSingleton(sp => new( + new AzureOpenAIModelOptions( + config.Azure.OpenAIApiKey, + "gpt-4o", + config.Azure.OpenAIEndpoint + ) + { + LogRequests = true + }, + sp.GetService() + )); + + // Create Kernel Memory Serverless instance using AzureOpenAI embeddings API + builder.Services.AddSingleton((sp) => + { + AzureOpenAIConfig azureConfig = new() + { + Auth = AzureOpenAIConfig.AuthTypes.APIKey, + APIKey = config.Azure.OpenAIApiKey, + Endpoint = config.Azure.OpenAIEndpoint, + APIType = AzureOpenAIConfig.APITypes.EmbeddingGeneration, + Deployment = "text-embedding-ada-002" // Update this to the deployment you want to use + }; + + return new KernelMemoryBuilder() + .WithAzureOpenAITextEmbeddingGeneration(azureConfig) + .WithAzureOpenAITextGeneration(azureConfig) + .WithSimpleFileStorage() + .Build(); + }); +} +else +{ + throw new Exception("please configure settings for either OpenAI or Azure"); +} + +builder.Services.AddSingleton((sp) => +{ + return new KernelMemoryDataSource("teams-ai", sp.GetService()!); +}); + +// Create the bot as transient. In this case the ASP Controller is expecting an IBot. +builder.Services.AddTransient(sp => +{ + // Create loggers + ILoggerFactory loggerFactory = sp.GetService()!; + + // Create Prompt Manager + PromptManager prompts = new(new() + { + PromptFolder = "./Prompts" + }); + + prompts.AddDataSource("teams-ai", sp.GetService()!); + + ResponseReceivedHandler endStreamHandler = new((object sender, ResponseReceivedEventArgs args) => + { + StreamingResponse? streamer = args.Streamer; + + if (streamer == null) + { + return; + } + + AdaptiveCard adaptiveCard = new("1.6") + { + Body = [new AdaptiveTextBlock(streamer.Message) { Wrap = true }] + }; + + var adaptiveCardAttachment = new Attachment() + { + ContentType = "application/vnd.microsoft.card.adaptive", + Content = adaptiveCard, + }; + + + streamer.Attachments = [adaptiveCardAttachment]; + + }); + + // Create ActionPlanner + ActionPlanner planner = new( + options: new( + model: sp.GetService()!, + prompts: prompts, + defaultPrompt: async (context, state, planner) => + { + PromptTemplate template = prompts.GetPrompt("Chat"); + return await Task.FromResult(template); + } + ) + { + LogRepairs = true, + StartStreamingMessage = "Loading stream results...", + EndStreamHandler = endStreamHandler + }, + loggerFactory: loggerFactory + ); + + Application app = new ApplicationBuilder() + .WithAIOptions(new(planner)) + .WithStorage(sp.GetService()!) + .Build(); + + app.AI.ImportActions(new ActionHandlers()); + + return app; +}); + +var app = builder.Build(); + +if (app.Environment.IsDevelopment()) +{ + app.UseDeveloperExceptionPage(); +} + +app.UseStaticFiles(); +app.UseRouting(); +app.MapControllers(); + +app.Run(); diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/config.json b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/config.json new file mode 100644 index 000000000..12e5546ae --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/config.json @@ -0,0 +1,24 @@ +{ + "schema": 1.1, + "description": "A bot that help developers build Teams apps", + "type": "completion", + "completion": { + "model": "gpt-4o", + "completion_type": "chat", + "include_history": true, + "include_input": true, + "max_input_tokens": 2000, + "max_tokens": 1000, + "temperature": 0.2, + "top_p": 0.0, + "presence_penalty": 0.6, + "frequency_penalty": 0.0, + "stop_sequences": [] + }, + "augmentation": { + "augmentation_type": "none", + "data_sources": { + "teams-ai": 900 + } + } +} \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/skprompt.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/skprompt.txt new file mode 100644 index 000000000..cb129f502 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Prompts/Chat/skprompt.txt @@ -0,0 +1,4 @@ +The following is a conversation with an AI assistant, its name is Teams Chef. +Teams Chef is an expert in Microsoft Teams apps development and the Human is junior developer learning Microsoft Teams development for the first time. +Teams Chef should always reply by explaining new concepts in simple terms using cooking as parallel concepts. +Teams Chef should always greet the human, ask them their name, and then guide the junior developer in his journey to build new apps for Microsoft Teams. \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/Properties/launchSettings.json b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Properties/launchSettings.json new file mode 100644 index 000000000..67cb0e0a6 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/Properties/launchSettings.json @@ -0,0 +1,27 @@ +{ + "profiles": { + // Debug project within Teams + "Microsoft Teams (browser)": { + "commandName": "Project", + "dotnetRunMessages": true, + "launchBrowser": true, + "launchUrl": "https://teams.microsoft.com/l/app/b31fb066-2d67-4b19-aa5a-8656e90dea38?installAppPackage=true&webjoin=true&appTenantId=d247b24d-59a3-4042-8253-90aa371a6eb4&login_hint=lilydu_microsoft.com", + "applicationUrl": "http://localhost:5130", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + }, + "hotReloadProfile": "aspnetcore" + } + //// Uncomment following profile to debug project only (without launching Teams) + //, + //"Start Project (not in Teams)": { + // "commandName": "Project", + // "dotnetRunMessages": true, + // "applicationUrl": "https://localhost:7130;http://localhost:5130", + // "environmentVariables": { + // "ASPNETCORE_ENVIRONMENT": "Development" + // }, + // "hotReloadProfile": "aspnetcore" + //} + } +} \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/README.md b/dotnet/samples/04.ai.g.teamsChefBot-streaming/README.md new file mode 100644 index 000000000..67312964e --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/README.md @@ -0,0 +1,209 @@ +# Microsoft Teams Conversational Bot with AI: Teams Chef + +## Summary + +This is a conversational streaming bot for Microsoft Teams that thinks it's a Chef to help you cook Teams apps. The bot uses the `gpt-3.5-turbo` model to chat with Teams users and respond in a polite and respectful manner, staying within the scope of the conversation. + +This sample illustrates basic conversational bot behavior in Microsoft Teams. The bot is built to allow GPT to facilitate the conversation on its behalf, using only a natural language prompt file to guide it. + +## Streaming +In addition, the sample illustrates our streaming feature. + +The following configurations are needed: +- Use the `DefaultAugmentation` class +- Set `Stream = true` in the `OpenAIModel` declaration + +Optional additions: +- Set the informative message in the `ActionPlanner` declaration via the `StartStreamingMessage` config. + +- Set attachments in the final chunk via the `EndStreamHandler` in the `ActionPlanner` declaration. + - Useful methods include + - `streamer.Attachments = [...attachments]` + - `streamer.Message` + + +```cs + // Create OpenAI Model + builder.Services.AddSingleton (sp => new( + new OpenAIModelOptions(config.OpenAI.ApiKey, "gpt-4o") + { + LogRequests = true, + Stream = true, + }, + sp.GetService() + )); + +ResponseReceivedHandler endStreamHandler = new((object sender, ResponseReceivedEventArgs args) => + { + StreamingResponse? streamer = args.Streamer; + + if (streamer == null) + { + return; + } + + AdaptiveCard adaptiveCard = new("1.6") + { + Body = [new AdaptiveTextBlock(streamer.Message) { Wrap = true }] + }; + + var adaptiveCardAttachment = new Attachment() + { + ContentType = "application/vnd.microsoft.card.adaptive", + Content = adaptiveCard, + }; + + + streamer.Attachments = [adaptiveCardAttachment]; + + }); + + + // Create ActionPlanner + ActionPlanner planner = new( + options: new( + model: sp.GetService()!, + prompts: prompts, + defaultPrompt: async (context, state, planner) => + { + PromptTemplate template = prompts.GetPrompt("Chat"); + return await Task.FromResult(template); + } + ) + { + LogRepairs = true, + StartStreamingMessage = "Loading stream results...", + EndStreamHandler = endStreamHandler + }, + loggerFactory: loggerFactory + ); +``` + +## Set up instructions + +All the samples in the C# .NET SDK can be set up in the same way. You can find the step by step instructions here: [Setup Instructions](../README.md). + +Note that, this sample requires AI service so you need one more pre-step before Local Debug (F5). + +1. Set your Azure OpenAI related settings to *appsettings.Development.json*. + + ```json + "Azure": { + "OpenAIApiKey": "", + "OpenAIEndpoint": "", + "ContentSafetyApiKey": "", + "ContentSafetyEndpoint": "" + } + ``` + +## Interacting with the bot + +![Teams Chef Bot](./assets/TeamsChefBot.png) + +Interacting with the bot is simple - talk to it! You can invoke it by using @ mention and talk to it in plain language. + +The bot uses the text-davinci-003 model to chat with Teams users and respond in a polite and respectful manner, staying within the scope of the conversation. This is possible due to the `skprompt.txt` file's contents. + +## Deploy to Azure + +You can use Teams Toolkit for Visual Studio or CLI to host the bot in Azure. The sample includes Bicep templates in the `/infra` directory which are used by the tools to create resources in Azure. + +You can find deployment instructions [here](../README.md#deploy-to-azure). + +Note that, this sample requires AI service so you need one more pre-step before deploy to Azure. To configure the Azure resources to have an environment variable for the Azure OpenAI Key and other settings: + +1. In `./env/.env.dev.user` file, paste your Azure OpenAI related variables. + + ```bash + SECRET_AZURE_OPENAI_API_KEY= + SECRET_AZURE_OPENAI_ENDPOINT= + SECRET_AZURE_CONTENT_SAFETY_API_KEY= + SECRET_AZURE_CONTENT_SAFETY_ENDPOINT= + ``` + +The `SECRET_` prefix is a convention used by Teams Toolkit to mask the value in any logging output and is optional. + +## Use OpenAI + +Above steps use Azure OpenAI as AI service, optionally, you can also use OpenAI as AI service. + +**As prerequisites** + +1. Get an OpenAI api key. + +**For debugging (F5)** + +1. Set your [OpenAI API Key](https://platform.openai.com/settings/profile?tab=api-keys) to *appsettings.Development.json*. + + ```json + "OpenAI": { + "ApiKey": "" + }, + ``` + +**For deployment to Azure** + +To configure the Azure resources to have OpenAI environment variables: + +1. In `./env/.env.dev.user` file, paste your [OpenAI API Key](https://platform.openai.com/settings/profile?tab=api-keys) to the environment variable `SECRET_OPENAI_KEY=`. + +## Appendix + +Here's a list of the different capabilities shown in this sample: + +
+
Bot scaffolding
+ Throughout the 'Program.cs' file you'll see the scaffolding created to run a simple conversational bot, e.g. storage, authentication, and conversation state. +
+ + +
+
Prompt engineering
+The 'Prompts/Chat/skprompt.txt' file has descriptive prompt engineering that, in plain language and with minor training, instructs GPT how the bot should conduct itself and facilitate conversation. + +#### skprompt.txt + +```text +The following is a conversation with an AI assistant, its name is Teams Chef. +Teams Chef is an expert in Microsoft Teams apps development and the Human is junior developer learning Microsoft Teams development for the first time. +Teams Chef should always reply by explaining new concepts in simple terms using cooking as parallel concepts. +Teams Chef should always greet the human, ask them their name, and then guide the junior developer in his journey to build new apps for Microsoft Teams. + +{{$history}} +Human: {{$input}} +AI: +``` + +- The major section ("*The following is ... for Microsoft Teams.*") defines the basic direction, to tell how AI should behave on human's input. +- The final section ("*Human: ... AI: ...*") defines the input of current turn. The variable "*{{$history}}*" lets AI know the context about previous turns. +- The variables "*{{input}}*", and "*{{history}}*" are automatically resolved from `TurnState.Temp`. + +
+
+
Conversational session history
+ Because this sample leaves the conversation to GPT, the bot simply facilitates user conversation as-is. But because it includes the 'skprompt.txt' file to guide it, GPT will store and leverage session history appropriately. + +For example, let's say the user's name is "Dave". The bot might carry on the following conversation: + +``` +AI: Hi there! My name is Teams Chef. It's nice to meet you. What's your name? +DAVE: My name is Dave. +AI:Hi Dave! It's great to meet you. Let me help you get started with Microsoft Teams app development. Have you ever cooked before? +DAVE: No, not yet, why? +AI:Cooking is a great way to learn ... +DAVE: Which kind of apps can I build for Microsoft Teams? +AI: Great question! You can build a variety ... +``` + +Notice that the bot remembered Dave's first message when responding to the second. + +
+
+
Localization across languages
+ Because this sample leverages GPT for all its natural language modelling, the user can talk to an AI bot in any language of their choosing. The bot will understand and respond appropriately with no additional code required. +
+ +## Further reading + +- [Teams Toolkit overview](https://aka.ms/vs-teams-toolkit-getting-started) +- [How Microsoft Teams bots work](https://docs.microsoft.com/en-us/azure/bot-service/bot-builder-basics-teams?view=azure-bot-service-4.0&tabs=csharp) diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.csproj b/dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.csproj new file mode 100644 index 000000000..447ddbeac --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.csproj @@ -0,0 +1,62 @@ + + + + net8.0 + enable + enable + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PreserveNewest + None + + + + + + + + + + + ..\..\packages\Microsoft.TeamsAI\Microsoft.TeamsAI\obj\Debug\netstandard2.0\Microsoft.Teams.AI.dll + + + + diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.sln b/dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.sln new file mode 100644 index 000000000..b34f8c3d0 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/TeamsChefBot.sln @@ -0,0 +1,25 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.6.33815.320 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TeamsChefBot", "TeamsChefBot.csproj", "{C2964D35-6742-4DBF-9685-5DD5A01D8D82}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {C2964D35-6742-4DBF-9685-5DD5A01D8D82}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C2964D35-6742-4DBF-9685-5DD5A01D8D82}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C2964D35-6742-4DBF-9685-5DD5A01D8D82}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C2964D35-6742-4DBF-9685-5DD5A01D8D82}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {30CCD595-AEBE-4CC2-B016-33E2EA023EAE} + EndGlobalSection +EndGlobal diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/color.png b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/color.png new file mode 100644 index 000000000..4a6e40485 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/color.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:745e422c0a72f2c2ee9c80b097feb22c27bf5a0d63861d5a9c8242af99d8e1be +size 4990 diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/manifest.json b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/manifest.json new file mode 100644 index 000000000..827414e9e --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/manifest.json @@ -0,0 +1,48 @@ +{ + "$schema": "https://developer.microsoft.com/json-schemas/teams/v1.15/MicrosoftTeams.schema.json", + "version": "1.1.0", + "manifestVersion": "1.15", + "id": "${{TEAMS_APP_ID}}", + "packageName": "com.package.name", + "name": { + "short": "TeamsChef${{APP_NAME_SUFFIX}}", + "full": "Teams Developer Chef" + }, + "developer": { + "name": "TeamsChef", + "mpnId": "", + "websiteUrl": "https://microsoft.com", + "privacyUrl": "https://privacy.microsoft.com/privacystatement", + "termsOfUseUrl": "https://www.microsoft.com/legal/terms-of-use" + }, + "description": { + "short": "Sample bot that thinks it's a Chef to help you cook Teams apps", + "full": "Sample bot that thinks it's a Chef to help you cook Teams apps" + }, + "icons": { + "outline": "outline.png", + "color": "color.png" + }, + "accentColor": "#FFFFFF", + "staticTabs": [ + { + "entityId": "conversations", + "scopes": ["personal"] + }, + { + "entityId": "about", + "scopes": ["personal"] + } + ], + "bots": [ + { + "botId": "${{BOT_ID}}", + "scopes": ["personal", "team", "groupChat"], + "isNotificationOnly": false, + "supportsCalling": false, + "supportsVideo": false, + "supportsFiles": false + } + ], + "validDomains": [] +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/outline.png b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/outline.png new file mode 100644 index 000000000..b4123e1f2 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appPackage/outline.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de215c72bbca73d2ebd301f81faaa2768786d62baf48ee45eb8edf6252d323e6 +size 852 diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.Development.json b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.Development.json new file mode 100644 index 000000000..1928e121f --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.Development.json @@ -0,0 +1,21 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning", + "Microsoft.Teams.AI": "Trace" + } + }, + "AllowedHosts": "*", + "BOT_ID": "${botId}", + "BOT_PASSWORD": "${botPassword}", + "Azure": { + "OpenAIApiKey": "", + "OpenAIEndpoint": "", + "ContentSafetyApiKey": "", + "ContentSafetyEndpoint": "" + }, + "OpenAI": { + "ApiKey": "" + } +} \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.json b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.json new file mode 100644 index 000000000..9ac767903 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/appsettings.json @@ -0,0 +1,20 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + }, + "AllowedHosts": "*", + "BOT_ID": "${botId}", + "BOT_PASSWORD": "${botPassword}", + "Azure": { + "OpenAIApiKey": "", + "OpenAIEndpoint": "", + "ContentSafetyApiKey": "", + "ContentSafetyEndpoint": "" + }, + "OpenAI": { + "ApiKey": "" + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/assets/TeamsChefBot.png b/dotnet/samples/04.ai.g.teamsChefBot-streaming/assets/TeamsChefBot.png new file mode 100644 index 000000000..e5087b7fb --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/assets/TeamsChefBot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a897b5ac531ab7dcb3b240be4e8cca855698cad7f053cfd390ed720956f83e13 +size 184778 diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.dev b/dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.dev new file mode 100644 index 000000000..efcbe1f06 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.dev @@ -0,0 +1,18 @@ +# This file includes environment variables that will be committed to git by default. + +# Built-in environment variables +TEAMSFX_ENV=dev + +# Updating AZURE_SUBSCRIPTION_ID or AZURE_RESOURCE_GROUP_NAME after provision may also require an update to RESOURCE_SUFFIX, because some services require a globally unique name across subscriptions/resource groups. +AZURE_SUBSCRIPTION_ID= +AZURE_RESOURCE_GROUP_NAME= +RESOURCE_SUFFIX= + +# Generated during provision, you can also add your own variables. +BOT_ID= +TEAMS_APP_ID= +BOT_AZURE_APP_SERVICE_RESOURCE_ID= +BOT_DOMAIN= + + +APP_NAME_SUFFIX=dev diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.local b/dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.local new file mode 100644 index 000000000..07b69ee56 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/env/.env.local @@ -0,0 +1,12 @@ +# This file includes environment variables that can be committed to git. It's gitignored by default because it represents your local development environment. + +# Built-in environment variables +TEAMSFX_ENV=local + +# Generated during provision, you can also add your own variables. +BOT_ID= +TEAMS_APP_ID= +BOT_DOMAIN= + + +APP_NAME_SUFFIX=local \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/README.md b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/README.md new file mode 100644 index 000000000..14e02e05e --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/README.md @@ -0,0 +1 @@ +Each document in this folder is a markdown file that was scraped from the Teams AI Github repository. This knowledge base will be used by the Teams Chef bot to answer questions about the Teams AI library. \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/action-planner.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/action-planner.txt new file mode 100644 index 000000000..85583c58b --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/action-planner.txt @@ -0,0 +1,28 @@ +# Action Planner + +The Action Planner is a powerful planner that uses an LLM to generate plans. It can trigger parameterized actions and send text-based responses to the user. It supports the following advanced features: + +### [Prompt Management](./PROMPTS.md) + +The Action Planner has a built-in prompt management system that supports creating prompt templates as folders in the file system. A prompt template is the prompt text along with all the configurations for completion with the LLM model. Dynamic prompts also support template variables and functions. + +#### [Data Sources](./DATA-SOURCES.md) +Use data sources to augment prompts even further and facilitate better responses. + +### [Augmentations](./AUGMENTATIONS.md) +Augmentations virtually eliminate the need for prompt engineering. Prompts +can be configured to use a named augmentation which will be automatically appended to the outgoing +prompt. Augmentations let the developer specify whether they want to support multi-step plans (sequence), +or create an AutoGPT style agent (monologue). + +### Validations +Validators are used to validate the response returned by the LLM and can guarantee +that the parameters passed to an action match a supplied schema. The validator used is automatically +selected based on the augmentation being used. Validators also prevent hallucinated action names, +making it impossible for the LLM to trigger an action that doesn't exist. + +### Repair +The Action Planner will automatically attempt to repair invalid responses returned by the +LLM using a feedback loop. When a validation fails, the ActionPlanner sends the error back to the +model, along with an instruction asking it to fix its mistake. This feedback technique leads to a +dramatic reduction in the number of invalid responses returned by the model. \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/actions.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/actions.txt new file mode 100644 index 000000000..7551dcaca --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/actions.txt @@ -0,0 +1,62 @@ +# Action + +An action is an atomic function that is registered to the AI System. It is a fundamental building block of a plan. + +Here's an example of what an action creating a new list would look like in code. Call this action `createList`: + +### C# + +[List Bot sample](https://github.com/microsoft/teams-ai/blob/a20f8715d3fe81e11c330853e3930e22abe298af/dotnet/samples/04.ai.d.chainedActions.listBot/ListBotActions.cs#L15) +```C# +[Action("createList")] +public bool CreateList([ActionTurnState] ListState turnState, [ActionParameters] Dictionary parameters) +{ + ArgumentNullException.ThrowIfNull(turnState); + ArgumentNullException.ThrowIfNull(parameters); + + string listName = GetParameterString(parameters, "list"); + + EnsureListExists(turnState, listName); + + // Continues execution of next command in the plan. + return ""; +} +``` + +> Adding the `Action` attribute marks the method as an action. To register it to the AI System you have pass the instance object containing this method to the `AI.ImportActions(instance)` method. Alternatively, you can use the `AI.RegisterAction(name, handler)` to register a single action. + +### JS + +[List Bot sample](https://github.com/microsoft/teams-ai/blob/0fca2ed09d327ecdc682f2b15eb342a552733f5e/js/samples/04.ai.d.chainedActions.listBot/src/index.ts#L153) + +```typescript +app.ai.action("createList", async (context: TurnContext, state: ApplicationTurnState, parameters: ListAndItems) => { + // Ex. create a list with name "Grocery Shopping". + ensureListExists(state, parameters.list); + + // Continues exectuion of next command in the plan. + return true; +}); +``` + +> The `action` method registers the action named `createList` with corresponding callback function. + + +## Default Actions + +The user can register custom actions or override default actions in the system. Below is a list of default actions present: + +| Action | Called when | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `___UnknownAction___` | An unknown action is predicted by the planner. | +| `___FlaggedInput___` | The input is flagged by the moderator. | +| `___FlaggedOutput___` | The output is flagged by the moderator. | +| `___HttpError___` | The planner encounters an HTTP response with status code >= `400` | +| `__TooManySteps__` | The planner task either executed too many steps or timed out. | +| `___PlanReady___` | The plan has been predicted by the planner and it has passed moderation. This can be overriden to mutate the plan before execution. | +| `___DO___` | The AI system is executing a plan with the DO command. Overriding this action will change how _all_ DO commands are handled. | +| `___SAY___` | The AI system is executing a plan with the SAY command. Overriding this action will change how _all_ SAY commands are handled. By default this will send the `response` message back to the user. | + +> Detailed description of each action can be found in the codebase. + +Note that `___DO___` and `___SAY___`, despite being called commands, are actually specialized actions. This means that a plan is really a sequence of actions. diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/ai-system.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/ai-system.txt new file mode 100644 index 000000000..ab29432a5 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/ai-system.txt @@ -0,0 +1,7 @@ +# The AI System + +The AI system is responsible for moderating input and output, generating plans, and executing them. It can be used free standing or routed to by the Application object. This system is encapsulated in the `AI` class. It is made of three components, the moderator, the planner, and actions. + +1. [Moderator](./MODERATOR.md) +2. [Planner](./PLANNER.md) +3. [Actions](./ACTIONS.md) \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/application.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/application.txt new file mode 100644 index 000000000..90fc4ef82 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/application.txt @@ -0,0 +1,69 @@ +# The `Application` class + +The `Application` class encapsulates all the business logic for the application and comprises of two major components, the _Activity Handler System_ and the _AI System_. + + +## The Activity Handler System + +The activity handler system is the primary way to implement bot or message extension application logic. It is a set of methods and configurations that allows you to register callbacks (known as route handlers), which will trigger based on the incoming activity. These can be in the form of a message, message reaction, or virtually any interaction within the Teams app. + +Here's an example of registering a route handler that will run when the the user sends *"/login"* to the bot: + +**JS** +```js +// Listen for user to say '/login'. +app.message('/login', async (context: TurnContext, state: TurnState) => { + await context.sendActivity(`Starting sign in flow.`); + // start signin flow +}); +``` + +**C#** +```cs +// Listen for user to say '/login'. +app.OnMessage("/login", async (ITurnContext turnContext, TurnState turnState, CancellationToken cancellationToken) => +{ + await turnContext.SendActivityAsync("Starting sign in flow.", cancellationToken: cancellationToken); + // start signin flow +}); +``` +> The `message` and `OnMessage` methods are referred to as activity or *route registration* method. +> The `turnContext` and `turnState` parameters are present in every route handler. To learn more about them see [TURNS](TURNS.md). + +The `Application` groups the route registration methods based on the specific feature groups: + + +| **Feature** | **Description** | +| ----------------- | ---------------------------------------------------------------- | +| Task Modules | Task module related activities like `task/fetch`. | +| Message Extension | Message extension activities like `composeExtension/query`. | +| Meetings | Meeting activites like `application/vnd.microsoft.meetingStart`. | +| AdaptiveCards | Adaptive card activities like `adaptiveCard/action`. | +| General | Generic activites like `message`. | + +> To see all the route registration methods supported, see the migration docs ([JS](https://github.com/microsoft/teams-ai/blob/main/getting-started/MIGRATION/JS.md#activity-handler-methods) | [C#](https://github.com/microsoft/teams-ai/blob/main/getting-started/MIGRATION/DOTNET.md#activity-handler-methods)). + +In general, the activity handler system is all that is needed to have a functional bot or message extension. + +## The AI System +The AI System is an optional component used to plug in LLM powered experiences like user intent mapping, chaining...etc. It is configured once when orchestrating the application class. To learn more about it see [The AI System](./AI-SYSTEM.md). + +## The Routing Logic + +When an incoming activity reaches the server, the bot adapter handles the necessary authentication and creates a turn context object that encapsulates the activity details. Then the `Application`'s main method (`run()` in Javscript. `OnTurnAsync()` in C#) is called. Its logic can be broken down into these eight steps. + +1. If configured in the application options, pulses of the `Typing` activity are sent to the user. +2. If configured in the application options, the @mention is removed from the incoming message activity. +3. The turn state is loaded using the configured turn state factory. +4. If user authentication is configured, then attempt to sign the user in. If the user is already signed in, retrieve the access token and continue to step 5. Otherwise, start the sign in flow and end the current turn. +5. The `beforeTurn` activity handler is executed. If it returns false, save turn state to storage and end the turn. +6. All routes are iterated over and if a selector function is triggered, then the corresponding route handler is executed. +7. If no route is triggered, the incoming activity is a message, and an AI System is configured, then it is invoked by calling the `AI.run()` method. +8. The `AfterTurnAsync` activity handler is executed. If it returns true, save turn state to storage. + + +> Note: _End the turn_ means that the main method has terminated execution and so the application has completed processing the incoming activity. + +> Note: To learn about what a *turn* is, see [TURNS](TURNS.md). + +![the routing logic](../assets/routing-logic.png) diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/augmentations.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/augmentations.txt new file mode 100644 index 000000000..c37675980 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/augmentations.txt @@ -0,0 +1,193 @@ +# Augmentations + +Augmentations virtually eliminate the need for prompt engineering. Prompts +can be configured to use a named augmentation which will be automatically appended to the outgoing +prompt. Augmentations let the developer specify whether they want to support multi-step plans (sequence), +or create an AutoGPT style agent (monologue). + +It is recommended to read the [AI System](./AI-SYSTEM.md) and [Action Planner](./ACTION-PLANNER.md) guides if you are not familiar with plans and actions. + +## Sequence Augmentation + +This augmentation allows the model to return a sequence of actions to perform. It does this by appending instructions to the prompt text during runtime. These instructions guide the model to generate a plan object that uses actions defined in the `actions.json` file from the prompt template folder. + +Here's an example of the `actions.json` file from the [Light Bot](https://github.com/microsoft/teams-ai/blob/77339da9e3e03bfd7f629fc796cfebdcd2891afb/js/samples/04.ai.c.actionMapping.lightBot/src/prompts/sequence/actions.json) sample: + +```json +[ + { + "name": "LightsOn", + "description": "Turns on the lights" + }, + { + "name": "LightsOff", + "description": "Turns off the lights" + }, + { + "name": "Pause", + "description": "Delays for a period of time", + "parameters": { + "type": "object", + "properties": { + "time": { + "type": "number", + "description": "The amount of time to delay in milliseconds" + } + }, + "required": [ + "time" + ] + } + } +] +``` + +It defines three actions, `LightsOn`, `LightsOff` and `Pause`. The `Pause` action requires the `time` parameter, while the other two don't. + +These actions are then appended to the prompt text during runtime. This is text added to end of the prompt text: + +```txt +actions: + LightsOn: + description: Turns on the lights + LightsOff: + description: Turns off the lights + Pause: + description: Delays for a period of time + parameters: + time: + type: number + description: The amount of time to delay in milliseconds + +Use the actions above to create a plan in the following JSON format: + +{ + "type": "plan", + "commands": [ + { + "type": "DO", + "action": "", + "parameters": { + "": "" + } + }, + { + "type": "SAY", + "response": "" + } + ] +} +``` +> Note: When the prompt is rendered, the above text is compressed to reduce token usage. + +The first section lists the actions in yaml structure. The second section tells the model to return a plan object of the following schema. + +### Configuring your prompt + +There are two steps to use sequence augmentation in your prompt: + +1. Update the prompt's `config.json` by adding the `augmentation` property. +```diff +{ + "schema": 1.1, + "description": "", + "type": "", + "completion": {}, ++ "augmentation": { ++ "augmentation_type": "sequence" ++ } +} +``` +2. Create an `actions.json` file in the prompt folder with a list of actions. For example: +```json +[ + { + "name": "LightsOn", + "description": "Turns on the lights" + }, + { + "name": "LightsOff", + "description": "Turns off the lights" + }, +] +``` + +To learn more about the action object schema see the corresponding typescript interface [ChatCompletionAction](https://github.com/microsoft/teams-ai/blob/0fca2ed09d327ecdc682f2b15eb342a552733f5e/js/packages/teams-ai/src/models/ChatCompletionAction.ts#L14). + + +## Monologue Augmentation + +This augmentation adds support for an inner monologue to the prompt. The monologue helps the LLM perform chain-of-thought reasoning across multiple turns of conversation. It does this by appending instructions to the prompt text during runtime. It tells the model to explicitly show it's thought, reasoning and plan in response to the user's message, then predict the next action to execute. If looping is configured, then the predicted action can guide the model to predict the next action by returning the instruction as a string in the action handler callback. The loop will terminate as soon as the model predicts a *SAY* action, which sends the response back to the user. + +Using the `actions.json` example from abovce, the instructions appended to the prompt look like the text below. + +These actions are then used and appended to the prompt text in runtime. This is text added to end of the prompt text: + +```txt +actions: + LightsOn: + description: Turns on the lights + LightsOff: + description: Turns off the lights + Pause: + description: Delays for a period of time + parameters: + time: + type: number + description: The amount of time to delay in milliseconds + +Return a JSON object with your thoughts and the next action to perform. +Only respond with the JSON format below and base your plan on the actions above. +If you're not sure what to do, you can always say something by returning a SAY action. +If you're told your JSON response has errors, do your best to fix them. + +Response Format: + +{ + "thoughts": { + "thought": "", + "reasoning": "", + "plan": "- short bulleted\\n- list that conveys\\n- long-term plan" + }, + "action": { + "name": "", + "parameters": { + "": "" + } + } +} +``` + +> Note: When the prompt is rendered, the above text is compressed to reduce token usage. + +The first section lists the actions in yaml structure. The second section tells the model to return a plan object of the following schema. + +### Configuring your prompt + +To use monologue augmentation in your prompt there are two steps. + +1. Update the prompt's `config.json` by adding the `augmentation` property. +```diff +{ + "schema": 1.1, + "description": "", + "type": "", + "completion": {}, ++ "augmentation": { ++ "augmentation_type": "monologue" ++ } +} +``` +2. Create an `actions.json` file in the prompt folder with a list of actions. For example: +```json +[ + { + "name": "LightsOn", + "description": "Turns on the lights" + }, + { + "name": "LightsOff", + "description": "Turns off the lights" + }, +] +``` diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/c#-migration.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/c#-migration.txt new file mode 100644 index 000000000..26d9e3ad9 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/c#-migration.txt @@ -0,0 +1,244 @@ +# Migrating from the BotFramework SDK (C#) + +_**Navigation**_ +- [00.OVERVIEW](./README.md) +- [01.JS](./JS.md) +- [**02.DOTNET**](./DOTNET.md) +___ + +If you have a bot built using the C# BF SDK, the following will help you update your bot to the Teams AI library. + +## New Project or Migrate existing app + +Since the library builds on top of the BF SDK, much of the bot logic can be directly carried over to the Teams AI app. If you want to start with a new project, set up the Echo bot sample in the [quick start](../.QUICKSTART.md) guide and jump directly to [step 2](#2-replace-the-activity-handler-implementations-with-specific-route-handlers). + +If you want to migrate your existing app start with [step 1](#1-replace-the-activityhandler-with-the-application-object). + +## 1. Replace the ActivityHandler with the Application object + +To understand how to replace the `ActivityHandler` with the `Application` object, let's look at the Echo bot sample from the BF SDK and the Teams AI library. + +**BF SDK [Echo bot](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/csharp_dotnetcore/02.echo-bot)** + +[`Startup.cs`](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/csharp_dotnetcore/02.echo-bot/Startup.cs) + +```cs +// Create the bot as a transient. In this case the ASP Controller is expecting an IBot. +services.AddTransient(); +``` + +[`EchoBot.cs`](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/csharp_dotnetcore/02.echo-bot/Bots/EchoBot.cs) + +```cs +public class EchoBot : ActivityHandler +{ + protected override async Task OnMessageActivityAsync(ITurnContext turnContext, CancellationToken cancellationToken) + { + var replyText = $"Echo: {turnContext.Activity.Text}"; + await turnContext.SendActivityAsync(MessageFactory.Text(replyText, replyText), cancellationToken); + } +} +``` + +> Note that `Echobot` derives from the `ActivityHandler` class. + +**Teams AI library [Echo bot](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/01.messaging.echoBot)** + +[`Program.cs`](https://github.com/microsoft/teams-ai/blob/main/dotnet/samples/01.messaging.echoBot/Program.cs) + +```cs +// Create the storage to persist turn state +builder.Services.AddSingleton(); + +// Create the bot as a transient. In this case the ASP Controller is expecting an IBot. +builder.Services.AddTransient(sp => +{ + IStorage storage = sp.GetService(); + ApplicationOptions applicationOptions = new() + { + Storage = storage, + TurnStateFactory = () => + { + return new AppState(); + } + }; + + Application app = new(applicationOptions); + + // Listen for user to say "/reset" and then delete conversation state + app.OnMessage("/reset", ActivityHandlers.ResetMessageHandler); + + // Listen for ANY message to be received. MUST BE AFTER ANY OTHER MESSAGE HANDLERS + app.OnActivity(ActivityTypes.Message, ActivityHandlers.MessageHandler); + + return app; +}); +``` + +[`ActivityHandlers.cs`](https://github.com/microsoft/teams-ai/blob/main/dotnet/samples/01.messaging.echoBot/ActivityHandlers.cs) + +```cs + /// + /// Defines the activity handlers. + /// + public static class ActivityHandlers + { + /// + /// Handles "/reset" message. + /// + public static RouteHandler ResetMessageHandler = async (ITurnContext turnContext, AppState turnState, CancellationToken cancellationToken) => + { + turnState.DeleteConversationState(); + await turnContext.SendActivityAsync("Ok I've deleted the current conversation state", cancellationToken: cancellationToken); + }; + + /// + /// Handles messages except "/reset". + /// + public static RouteHandler MessageHandler = async (ITurnContext turnContext, AppState turnState, CancellationToken cancellationToken) => + { + int count = turnState.Conversation.MessageCount; + + // Increment count state. + turnState.Conversation.MessageCount = ++count; + + await turnContext.SendActivityAsync($"[{count}] you said: {turnContext.Activity.Text}", cancellationToken: cancellationToken); + }; + } +``` + +#### Optional ApplicationBuilder Class + +You may also use the `ApplicationBuilder` class to build your `Application`. This option provides greater readability and separates the management of the various configuration options (e.g., storage, turn state, AI options, etc). + +```cs +//// Constructor initialization method +// Application app = new() +// { +// storage +// }; + +// Build pattern method +var applicationBuilder = new ApplicationBuilder() + .WithStorage(storage); + +// Create Application +Application app = applicationBuilder.Build(); +``` + +## 2. Replace the activity handler implementations with specific route registration method. + +The `EchoBot` class derives from the `ActivityHandler` class. Each method in the class corresponds to a specific route registration method in the `Application` object. Here's a simple example: + +Given the `EchoBot` implementation: + +```cs +public class EchoBot : ActivityHandler +{ + protected override async Task OnMessageActivityAsync(ITurnContext turnContext, CancellationToken cancellationToken) + { + var replyText = $"Echo: {turnContext.Activity.Text}"; + await turnContext.SendActivityAsync(MessageFactory.Text(replyText, replyText), cancellationToken); + } +} +``` + +This is how a route should be added to the `Application` object: + +```cs +app.OnActivity(ActivityTypes.Message, async (ITurnContext turnContext, TurnState turnState, CancellationToken cancellationToken) => +{ + var replyText = $"Echo: {turnContext.Activity.Text}"; + await turnContext.SendActivityAsync(MessageFactory.Text(replyText, replyText), cancellationToken); +}); +``` + +> The `OnActivity` method allows you to register a possible route for the incomming activity. For each method in the `ActivityHandler` or `TeamsActivityHandler` class, there is an equivalent route registration method. + +If your bot derives from `ActivityHandler` or the `TeamsActivityHandler` refer to the following table to see which method maps to which `Application` route registration method. + +## Activity Handler Methods + +If your bot derives from the `TeamsActivityHandler` refer to the following table to see which method maps to which `Application` route registration method. + +#### Invoke Activities + +| `TeamsActivityHandler` method | `Application` route registration method | +| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | +| `OnTeamsO365ConnectorCardActionAsync` | `OnO365ConnectorCardAction` (usage: `app.OnO365ConnectorCardAction(...)`) | +| `OnTeamsFileConsentAsync` | Either `OnFileConsentAccept` or `OnFileConsentDecline` | +| `OnTeamsConfigFetchAsync` | `OnConfigFetch` | +| `OnTeamsConfigSubmitAsync` | `OnConfigSubmit` | +| `OnTeamsTaskModuleFetchAsync` | `TaskModules.OnFetch` (usage: `app.TaskModules.Fetch(...)`) | +| `OnTeamsTaskModuleSubmitAsync` | `TaskModules.OnSubmit` | +| `OnTeamsConfigSubmitAsync` | `MessageExtensions.OnQueryLink` (usage: `app.MessageExtensions.OnQueryLink(...)`) | +| `OnTeamsAnonymousAppBasedLinkQueryAsync` | `MessageExtensions.OnAnonymousQueryLink` | +| `OnTeamsMessagingExtensionQueryAsync` | `MessageExtensions.OnQuery` | +| `OnTeamsMessagingExtensionSelectItemAsync` | `MessageExtensions.OnSelectItem` | +| `OnTeamsMessagingExtensionSubmitActionDispatchAsync` | `MessageExtensions.OnSubmitAction` | +| `OnTeamsMessagingExtensionFetchTaskAsync` | `MessageExtensions.OnFetchTask` | +| `OnTeamsMessagingExtensionConfigurationQuerySettingUrlAsync` | `MessageExtensions.OnQueryUrlSetting` | +| `OnTeamsMessagingExtensionConfigurationSettingAsync` | `MessageExtensions.OnConfigureSettings` | +| `OnTeamsMessagingExtensionCardButtonClickedAsync` | `MessageExtensions.OnCardButtonClicked` | +| `OnTeamsSigninVerifyStateAsync` | N/A (you should use the built-in user authentication feature instead of handling this manually) | + +#### Conversation Update Activities + +These are the following methods from the `TeamsActivityHandler`: + +- `onTeamsChannelCreatedAsync` +- `onTeamsChannelDeletedAsync` +- `onTeamsChannelRenamedAsync` +- `onTeamsTeamArchivedAsync` +- `onTeamsTeamDeletedAsync` +- `onTeamsTeamHardDeletedAsync` +- `onTeamsChannelRestoredAsync` +- `onTeamsTeamRenamedAsync` +- `onTeamsTeamRestoredAsync` +- `onTeamsTeamUnarchivedAsync` + +These activities can be handled using the `Application.OnConversationUpdate` method. + +For example in the `TeamsActivityHandler`: + +```cs +protected virtual Task OnTeamsChannelCreatedAsync(ChannelInfo channelInfo, TeamInfo teamInfo, ITurnContext turnContext, CancellationToken cancellationToken) +{ + // Handle channel created activity +} +``` + +The `Application` equivalent: + +```cs +app.OnConversationUpdate(ConversationUpdateEvents.ChannelCreated, async (ITurnContext turnContext, TurnState turnState, CancellationToken cancellationToken) => +{ + // Handle channel created activity +}); +``` + +> Note that the first parameter `event` specifies which conversation update event to handle. + +#### Message Activites + +| `TeamsActivityHandler` method | `Application` route registration method | +| -------------------------------- | -------------------------------------------------------- | +| `OnMessage` | `OnMessage` (usage: `app.OnMessage(...)`) | +| `OnTeamsMessageEditAsync` | `OnMessageEdit` | +| `OnTeamsMessageUndeletedAsync` | `OnMessageUndelete` | +| `OnTeamsMessageSoftDeleteAsync` | `OnMessageDelete` | +| `OnMessageReactionActivityAsync` | `OnMessageReactionsAdded` or `OnMessageReactionsRemoved` | +| `OnTeamsReadRecieptAsync` | `OnTeamsReadReceipt` | + +#### Meeting Activities + +| `TeamsActivityHandler` method | `Application` route registration method | +| -------------------------------------- | ------------------------------------------------------- | +| `OnTeamsMeetingStartAsync` | `Meetings.OnStart` (usage: `app.Meetings.OnStart(...)`) | +| `OnTeamsMeetingEndAsync` | `Meetings.OnEnd` | +| `OnTeamsMeetingParticipantsJoinAsync` | `Meetings.OnParticipantsJoin` | +| `OnTeamsMeetingParticipantsLeaveAsync` | `Meetings.OnParticipantsLeave` | + +#### Other Activities + +If there are activities for which there isn't a corresponding route registration method, you can use the generic route registration method `Application.OnActivity` and specify a custom selector function given the activity object as input. diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/concepts.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/concepts.txt new file mode 100644 index 000000000..3aaaf6f09 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/concepts.txt @@ -0,0 +1,33 @@ +# Concepts + +Here you will find short guides on features available in the library. + + + +**General Concepts** + +| Name | Description | +| --------------------------------------- | ------------------------------------------------- | +| [Turn Context and Turn State](TURNS.md) | Explains what the turn context and turn state is. | +| [The Application class](APPLICATION.md) | What the `Application` class is and how it works. | +| [User Authentication](USER-AUTH.md) | Describes user authentication features out of the box | + + +
+ +**The AI System Concepts** +| Name | Description | +| --------------------------------------- | ------------------------------------------------- | +| [The AI System](AI-SYSTEM.md) | Describes the AI System. | +| [Planner](PLANNER.md) | Planner and plans. | +| [Moderator](MODERATOR.md) | The moderator. | +| [Actions](ACTIONS.md) | The action in the AI system. | + +
+ +**Action Planner Concepts** +| Name | Description | +| --------------------------------------- | ------------------------------------------------- | +| [The Action Planner](ACTION-PLANNER.md) | Describes the Action Planner. | +| [Prompt Management](PROMPTS.md) | Prompts, prompt templates, and creating prompt templates. | +| [Augmentations](AUGMENTATIONS.md) | Monologue and sequence augmentations. | diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/data-sources.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/data-sources.txt new file mode 100644 index 000000000..b63f8f15d --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/data-sources.txt @@ -0,0 +1,47 @@ +# Data Sources +Data sources allow the injection of relevant information from external sources into prompts, such as vector databases or cognitive search. A vector data source makes it easy to add [RAG](https://en.wikipedia.org/wiki/Prompt_engineering#Retrieval-augmented_generation) to any prompt, allowing for better and more accurate replies from the bot. + +Within each Action Planner’s prompt management system, a list of data sources can be registered. For each data source, a max number of tokens to use is specified, via `maxTokens`. + +## Customize a Data Source +1. Construct a class that implements our `DataSource` base class. + +2. Register the data source with your prompt management system through the `addDataSource` function. + +3. To augment a specific prompt, you can specify the name(s) of the data sources within the prompt's `config.json` file. + +Our simplest example (primarily for testing) is `TextDataSource`, which adds a static block of text to a prompt. + +Our most complex example is the `VectraDataSource` in the Chef Bot sample, which uses an external library called Vectra. + +### Customized Example of VectraDataSource +Here is an example of the configuration for the +[Chef Bot sample](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.a.teamsChefBot): + +**JS** +```js +// Inside VectraDataSource.ts +export class VectraDataSource implements DataSource +``` + +```js +// Inside of index.ts +planner.prompts.addDataSource( + new VectraDataSource({ + name: 'teams-ai', + apiKey: process.env.OPENAI_KEY!, + azureApiKey: process.env.AZURE_OPENAI_KEY!, + azureEndpoint: process.env.AZURE_OPENAI_ENDPOINT!, + indexFolder: path.join(__dirname, '../index') + }) +); +``` +Inside the prompt's config.json. Here, `teams-ai` denotes the name of the VectraDataSource, and 1200 is `maxTokens`. +```json +"augmentation": { + "augmentation_type": "none", + "data_sources": { + "teams-ai": 1200 + } +} +``` \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/getting started.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/getting started.txt new file mode 100644 index 000000000..795e6d138 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/getting started.txt @@ -0,0 +1,31 @@ +# Getting Started + +_**Navigation**_ +- [**00.OVERVIEW**](./README.md) +- [01.QUICKSTART](./QUICKSTART.md) +- [02.SAMPLES](./SAMPLES.md) +___ + +### Get started with the Teams AI Library + +The first step is to get a basic bot running E2E through the [Quickstart](./QUICKSTART.md) guide to build Echo Bot, which echoes back any message sent to it. This simple bot helps to familiarize yourself with the Teams AI Library and ensures your system is set up correctly to move onto the AI powered samples. + +If you would rather dive into an AI sample first, check out the fully conversational Teams Chef Bot sample on the [samples](./SAMPLES.md) page. + +There are numerous samples to try showcasing the different capabilities of the Teams AI Library. + +### Migration + +If you have a bot built using the BotFramework SDK and want to migrate to the Teams AI library, see [Migration](./MIGRATION/README.md). + +### Concepts + +To dive deeper into the library learning more about its AI components and concepts, see [Concepts](./CONCEPTS/README.md). + +### Useful links + +- [Microsoft Learn Docs](https://learn.microsoft.com/en-us/microsoftteams/platform/bots/how-to/teams%20conversational%20ai/teams-conversation-ai-overview) +- [C# samples folder](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples) +- [JS samples folder](https://github.com/microsoft/teams-ai/tree/main/js/samples) +- [@microsoft/teams-ai package on npm](https://www.npmjs.com/package/@microsoft/teams-ai) +- [Microsoft.Teams.AI on nuget.org](https://www.nuget.org/packages/Microsoft.Teams.AI) diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/github.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/github.txt new file mode 100644 index 000000000..a0ab142a0 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/github.txt @@ -0,0 +1,19 @@ +# Teams AI Library + +Welcome to the Teams AI Library .NET package! + +This SDK is specifically designed to assist you in creating bots capable of interacting with Teams and Microsoft 365 applications. It is constructed using the [Bot Framework SDK](https://github.com/microsoft/botbuilder-dotnet) as its foundation, simplifying the process of developing bots that interact with Teams' artificial intelligence capabilities. See the [Teams AI repo README.md](https://github.com/microsoft/teams-ai), for general information, and JavaScript support is available via the [js](https://github.com/microsoft/teams-ai/tree/main/js) folder. + +Requirements: + +* [.NET 6.0 SDK](https://dotnet.microsoft.com/download/dotnet/6.0) +* (Optional) [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) resource or an account with [OpenAI](https://platform.openai.com/) + + +## Getting Started + +To get started, take a look at the [getting started docs](https://github.com/microsoft/teams-ai/blob/main/getting-started/README.md). + +## Migration + +If you're migrating an existing project, switching to add on the Teams AI Library layer is quick and simple. See the [migration guide](https://github.com/microsoft/teams-ai/blob/main/getting-started/MIGRATION/DOTNET.md). diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/js-migration.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/js-migration.txt new file mode 100644 index 000000000..445da387c --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/js-migration.txt @@ -0,0 +1,162 @@ +# Migrating from the BotFramework SDK (Javascript) + +_**Navigation**_ +- [00.OVERVIEW](./README.md) +- [**01.JS**](./JS.md) +- [02.DOTNET](./DOTNET.md) +___ + +If you have a bot built using the JS BotFramework SDK, the following will help you update your bot to the Teams AI library. + +## New Project or Migrate existing app + +Since the library builds on top of the BF SDK, much of the bot logic can be directly carried over to the Teams AI app. If you want to start with a new project, set up the Echo bot sample in the [quick start](../.QUICKSTART.md) guide and jump directly to [step 2](#2-replace-the-activity-handler-implementations-with-specific-route-handlers). + +If you want to migrate your existing app start with [step 1](#1-replace-the-activityhandler-with-the-application-object). + +## 1. Replace the ActivityHandler with the Application object + +Replace `ActivityHandler` with `Application`. + +```diff ++ import { Application, TurnState } from "@microsoft/teams-ai"; + +- const app = BotActivityHandler(); + +// Define storage and application ++ const storage = new MemoryStorage(); ++ const app = new Application({ + storage +}); +``` + +### Optional ApplicationBuilder Class + +You may also use the `ApplicationBuilder` class to build your `Application`. This option provides greater readability and separates the management of the various configuration options (e.g., storage, turn state, AI options, etc). + +```js +//// Constructor initialization method +// const app = new Application() +// { +// storage +// }; + +// Build pattern method +const app = new ApplicationBuilder() + .withStorage(storage) + .build(); // this internally calls the Application constructor +``` + +## 2. Replace the activity handler implementations with specific route registration methods + +The `BotActivityHandler` class derives from the `ActivityHandler` class. Each method in the class corresponds to a specific route registration method (`handler`) in the `Application` object. Here's a simple example: + +Given the `BotActivityHandler` implementation: + +```js +class BotActivityHandler extends ActivityHandler { + constructor() { + this.onMessage(async (context, next) => { + const replyText = `Echo: ${ context.activity.text }`; + await context.sendActivity(MessageFactory.text(replyText, replyText)); + await next(); + }); + } +} +``` + +This is how a route should be added to the `Application` object: + +```js +app.activity(ActivityTypes.Message, async(context: TurnContext, state: TurnState) => { + const replyText = `Echo: ${ context.activity.text }`; + await context.sendActivity(replyText); +}); +``` + +> The `activity` method is refered as a *route registration method*. For each method in the `ActivityHandler` or `TeamsActivityHandler` class, there is an equivalent route registration method. + +Your existing BF app will probably have different activity handlers implemented. To migrate that over with Teams AI route registration methods see the following. + +## Activity Handler Methods + +If your bot derives from the `TeamsActivityHandler` refer to the following table to see which method maps to which `Application` route registration method. + +### Invoke Activities + +| `TeamsActivityHandler` method | `Application` route registration method | +| ----------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `handleTeamsO365ConnectorCardAction` | `O365ConnectorCardAction` (usage: `app.O365ConnectorCardAction(...)`) | +| `handleTeamsFileConsent` | Either `fileConsentAccept` or `fileConsentDecline` | +| `handleTeamsTaskModuleFetch` | `taskModules.fetch` (usage: `app.taskModules.Fetch(...)`) | +| `handleTeamsTaskModuleSubmit` | `taskModules.submit` | +| `handleTeamsConfigFetch` | `taskModules.configFetch` | +| `handleTeamsConfigSubmit` | `taskModules.configSubmit` | +| `handleTeamsAppBasedLinkQuery` | `messageExtensions.queryLink` (usage: `app.MessageExtensions.queryLink(...)`) | +| `handleTeamsAnonymousAppBasedLinkQuery` | `messageExtensions.anonymousQueryLink` | +| `handleTeamsMessagingExtensionQuery` | `messageExtensions.query` | +| `handlehandleTeamsMessageExtensionSelectItem` | `messageExtensions.selectItem` | +| `handleTeamsMessagingExtensionSubmitActionDispatch` | `messageExtensions.submitAction` | +| `handleTeamsMessagingExtensionFetchTask` | `messageExtensions.fetchTask` | +| `handleTeamsMessagingExtensionConfigurationQuerySettingUrl` | `messageExtensions.queryUrlSetting` | +| `handleTeamsMessagingExtensionConfigurationSetting` | `messageExtensions.configureSettings` | +| `handleTeamsMessagingExtensionCardButtonClicked` | `messageExtensions.handleOnButtonClicked` | +| `handleTeamsSigninVerifyState` | N/A (you should use the built-in user authentication feature instead of handling this manually) | +| `handleTeamsSigninTokenExchange` | N/A (you should use the built-in user authentication feature instead of handling this manually) | + +### Conversation Update Activities + +These are the following methods from the `TeamsActivityHandler`: + +- `onTeamsChannelCreated` +- `onTeamsChannelDeleted` +- `onTeamsChannelRenamed` +- `onTeamsTeamArchived` +- `onTeamsTeamDeleted` +- `onTeamsTeamHardDeleted` +- `onTeamsChannelRestored` +- `onTeamsTeamRenamed` +- `onTeamsTeamRestored` +- `onTeamsTeamUnarchived` + +These activities can be handled using the `Application.conversationUpdate` method. + +For example in the `TeamsActivityHandler`: + +```js +protected async onTeamsChannelCreated(context: TurnContext): Promise { + // handle teams channel creation. +} +``` + +The `Application` equivalent: + +```js +app.conversationUpdate('channelCreated', (context: TurnContext, state: TurnState) => { + // handle teams channel creation. +}) +``` + +> Note that the first parameter `event` specifies which conversation update event to handle. It only accepts specific values that can be found through your IDE's intellisense. + +### Message Activites + +| `TeamsActivityHandler` method | `Application` route registration method | +| --------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `OnMessage` | `message` | +| `OnTeamsMessageUndelete`, `OnTeamsMessageEdit` & `OnTeamsMessageSoftDelete` | `messageEventUpdate` , the first parameter `event` specifies the activity. | +| `OnMessageReactionActivity` | `messageReactions` | +| `OnTeamsReadReciept` | `teamsReadReceipt` | + +### Meeting Activities + +| `TeamsActivityHandler` method | `Application` route registration method | +| --------------------------------- | ---------------------------- | +| `OnTeamsMeetingStart` | `meetings.start` | +| `OnTeamsMeetingEnd` | `meetings.end` | +| `onTeamsMeetingParticipantsJoin` | `meetings.participantsJoin` | +| `onTeamsMeetingParticipantsLeave` | `meetings.participantsLeave` | + +### Other Activities + +If there are activities for which there isn't a corresponding route registration method, you can use the generic route registration method `Application.activity` and specify a custom selector function given the activity object as input. diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/migration.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/migration.txt new file mode 100644 index 000000000..b7818b029 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/migration.txt @@ -0,0 +1,29 @@ +# Migration + +_**Navigation**_ +- [**00.OVERVIEW**](./README.md) +- [01.JS](./JS.md) +- [02.DOTNET](./DOTNET.md) +___ + +### Why you should migrate to the Teams AI library + +Is your Teams App currently built using the [BotFramework (BF) SDK](https://github.com/microsoft/botframework-sdk)? If so you should migrate it to the Teams AI library. + +Here are a few reasons why: + +1. Take advantage of the advanced AI system to build complex LLM-powered Teams applications. +2. User authentication is built right into the library, simplifying the set up process. +3. The library builds on top of fundamental BF SDK tools & concepts, so your existing knowledge is transferrable. +4. The library will continue to support latest tools & APIs in the LLM space. + +### Difference between the Teams AI library and BF SDK + +This library provides the `Application` object which replaces the traditional `ActivityHandler` object. It supports a simpler fluent style of authoring bots versus the inheritance based approach used by the `ActivityHandler` class. The `Application` object has built-in support for calling into the library's AI system which can be used to create bots that leverage Large Language Models (LLM) and other AI capabilities. It also has built-in support for configuring user authentication to access user data from third-party services. + +### Guides + +Here are the guides on how to migrate from the BotFramework SDK: + +1. [JS Migration](JS.md) +2. [C# Migration](DOTNET.md) diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/moderator.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/moderator.txt new file mode 100644 index 000000000..b2e3940c6 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/moderator.txt @@ -0,0 +1,68 @@ +# Moderator + +The `Moderator` is responsible for reviewing the input prompt and approving the AI generated plans. It is configured when orchestrating the `Application` class. + +The AI system is such that developers can create their own moderator class by simply implementing the moderator interface. The library has a few native moderators that can be used out of the box: + +| Name | Description | +| -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| [OpenAIModerator](#openai-moderator) | Wrapper around OpenAI's [Moderation API](https://platform.openai.com/docs/api-reference/moderations). | +| [AzureContentSafetyModerator](#azure-content-safety-moderator) | Wrapper around [Azure Content Safety](https://learn.microsoft.com/en-us/azure/ai-services/content-safety/overview) API | + +## OpenAI Moderator + +Here's an example of configuring the `OpenAIModerator`: + +**JS** +```js +const moderator = new OpenAIModerator({ + apiKey: process.env.OPENAI_KEY!, + moderate: 'both' +}); + +const app = new Application({ + storage, + ai: { + planner, + moderator + } +}); +``` + +**C#** +```cs +OpenAIModeratorOptions moderatorOptions = new(config.OpenAI.ApiKey, ModerationType.Both); +IModerator moderator = new OpenAIModerator(moderatorOptions); + +AIOptions aIOptions = new(planner) +{ + Moderator = moderator +}; + +var app = new ApplicationBuilder() +.WithStorage(storage) +.WithAIOptions(aIOptions) +.Build(); +``` +> This snippet is taken from the [Twenty Questions bot] sample. +> Note for C# application, the moderator should be registered to the Web app's service collection as a singleton. + +## Azure Content Safety Moderator + +Here's an example of configuring the `AzureContentSafetyModerator`: + +**JS** +```js +const moderator = new AzureContentSafetyModerator({ + apiKey: process.env.AZURE_CONTENT_SAFETY_KEY, + endpoint: process.env.AZURE_CONTENT_SAFETY_ENDPOINT, + apiVersion: '2023-04-30-preview', + moderate: 'both' +}); +``` + +**C#** +```cs +AzureContentSafetyModeratorOptions moderatorOptions = new(config.Azure.ContentSafetyApiKey, config.Azure.ContentSafetyEndpoint, ModerationType.Both); +IModerator moderator = new AzureContentSafetyModerator(moderatorOptions); +``` \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/planner.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/planner.txt new file mode 100644 index 000000000..00e766a7d --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/planner.txt @@ -0,0 +1,85 @@ +# Planner + +The planner receives the user's ask and returns a plan on how to accomplish the request. The user's ask is in the form of a prompt or prompt template. It does this by using AI to mix and match atomic functions (called _actions_) registered to the AI system so that it can recombine them into a series of steps that complete a goal. + +This is a powerful concept because it allows you to create actions that can be used in ways that you as a developer may not have thought of. + +For instance, If you have a task with `Summarize` & `SendEmail` actions, the planner could combine them to create workflows like "Rewrite the following report into a short paragraph and email it to johndoe@email.com" without you explicitly having to write code for those scenarios. + +The planner is an extensible part of the AI system. This means that a custom planner can be created for your specific needs. Out of the box, the Teams AI library supports the following planners. + + +| Planner | Description | C# | JS/TS | Python | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | --- | ----- | ------ | +| [ActionPlanner](./ACTION-PLANNER.md) | Powerful planner that uses LLMs to generate plans. It has a built-in prompt management, LLM modularity, amongst other features. | ✅ | ✅ | ❌ | +| AssistantsPlanner | A planner that uses OpenAI's Assistants APIs to generate plans. | ✅ | ✅ | ❌ | + +### Plan + +A plan is the entity that is generated by the planner. It is a JSON object of the following shape: + +```json +{ + "type": "plan", + "commands": [ + { + "type": "DO", + "action": "", + "parameters": { + "": "" + } + }, + { + "type": "SAY", + "response": "" + } + ] +} +``` + +A plan consists of two types of commands and their entities: + +- **SAY**: Sends a message to the user. + - _response_: The string message to send. +- **DO**: AI system will execute a specific _action_, passing in the generated parameters. + - _action_: A lambda function registered to the AI system + - _parameters_: A dictionary passed to the action. + +The JSON object string is returned by the LLM and deserialized into an object. + +#### Example + +Here's an example of a plan for the following ask: + +User: +`Create a grocery shopping list and add bananas to it.` + +Plan: + +```json +{ + "type": "plan", + "commands": [ + { + "type": "DO", + "action": "createList", + "entities": { + "name": "Grocery Shopping" + } + }, + { + "type": "DO", + "action": "addItem", + "entities": { + "name": "Bananas" + } + }, + { + "type": "SAY", + "response": "Created a grocery shopping list and added a banana to it." + } + ] +} +``` + +This plan is executed in sequential order. So first the list will be created and then an item will be added to it. Finally, the `response` message will be sent to the user. diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/prompts.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/prompts.txt new file mode 100644 index 000000000..efdad7742 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/prompts.txt @@ -0,0 +1,192 @@ +# Prompts + +Prompts play a crucial role in communicating and directing the behavior of Large Language Models (LLMs) AI. +They serve as inputs or queries that users can provide to elicit specific responses from a model. + +Here's a prompt that asks the LLM for name suggestions: + +_Input:_ + +``` +Give me 3 name suggestions for my pet golden retriever. +``` + +_Response:_ + +``` +Some possible name suggestions for a pet golden retriever are: + +- Bailey +- Sunny +- Cooper +``` + +# Prompt Template + +Prompt templates are a simple and powerful way to +define and compose AI functions **using plain text**. +You can use it to create natural language prompts, generate responses, extract +information, **invoke other prompts,** or perform any other task that can be +expressed with text. + +The language supports two basic features that allow you to include +variables and call functions. + +**Simple Example:** + +Here's an example of a prompt template: + +``` +Give me 3 name suggestions for my pet {{ $petName }}. +``` + +`$petName` is a variable that is populated on runtime when the template is rendered. + +## Prompt Template Language + +You don't need to write any code or import any external libraries, just use the +double curly braces {{...}} to embed expressions in your prompts. +Teams AI will parse your template and execute the logic behind it. +This way, you can easily integrate AI into your apps with minimal effort and +maximum flexibility. + +### Variables + +To include a variable value in your text, use the `{{$variableName}}` syntax. For example, if you have a variable called name that holds the user's name, you can write: + +`Hello {{$name}}, nice to meet you!` + +This will produce a greeting with the user's name. + +Spaces are ignored, so if you find it more readable, you can also write: + +`Hello {{ $name }}, nice to meet you!` + +Here's how to define variables in code: + +**C#** + +In an *action* or *route handler* where the turn state object is available: +```cs +state.Temp.Post = "Lorem Ipsum..." +``` + +The usage in the prompt: +``` +This is the user's post: {{ $post }} +``` + +> Note: The `turnState.Temp.Post = ...` updates a dictionary with the `post` key under the hood from the [AI Message Extension sample](https://github.com/microsoft/teams-ai/blob/a20f8715d3fe81e11c330853e3930e22abe298af/dotnet/samples/04.ai.b.messageExtensions.gptME/ActivityHandlers.cs#L156). + +**Javascript** + +```typescript +app.beforeTurn((context, state) => { + state.temp.post = "Lorem Ipsum..."; +}); +``` + +The usage in the prompt: +``` +This is the user's post: {{ $post }} +``` + +You can simply add to the `state.temp` object, and it will be accessible from the prompt template on runtime. Note that the safest place to do that would be in the `beforeTurn` activity because it will execute before any activity handler or action. + + +**Default Variables** + +The following are variables accessible in the prompt template without having to manually configure them. These are pre-defined in the turn state and populated by the library. Users can override them by changing it in the turn state. + +| Variable name | Description | +| ------------- | ---------------------------------------------- | +| `input` | Input passed from the user to the AI Library. | +| `lastOutput` | Output returned from the last executed action. | + +### Function calls + +To call an external function and embed the result in your text, use the `{{ functionName }}` syntax. For example, if you have a function called `diceRoll` that returns a random number between 1 and 6, you can write: + +`The dice roll has landed on: {{ diceRoll }}` + +**C#** + +In the `Application` class, + +```cs +prompts.AddFunction("diceRoll", async (context, memory, functions, tokenizer, args) => +{ + int diceRoll = // random number between 1 and 6 + return diceRoll; +}); +``` + +**Javascript** + +```typescript +prompts.addFunction('diceRoll', async (context, state, functions, tokenizer, args) => { + let diceRoll = // random number between 1 and 6 + return diceRoll; +}); +``` + +# Creating Prompt Templates + +Each prompt template is a folder with two files, `skprompt.txt` and `config.json`. The folder name is the prompt template's name which can be referred to in your code. The `skprompt.txt` file contains the prompt's text, which can contain natural language or prompt template syntax as defined in the previous section. The `config.json` file specifies the prompt completion configuration. + +Here's an example of a prompt template from the [Twenty Questions](https://github.com/microsoft/teams-ai/blob/c5ec11842b808e48cd214b3cb52da84e5811da33/js/samples/04.e.twentyQuestions) sample. + +*skprompt.txt* +``` +You are the AI in a game of 20 questions. +The goal of the game is for the Human to guess a secret within 20 questions. +The AI should answer questions about the secret. +The AI should assume that every message from the Human is a question about the secret. + +GuessCount: {{$conversation.guessCount}} +RemainingGuesses: {{$conversation.remainingGuesses}} +Secret: {{$conversation.secretWord}} + +Answer the human's question but do not mention the secret word. +``` + +*config.json* +```json +{ + "schema": 1.1, + "description": "A bot that plays a game of 20 questions", + "type": "completion", + "completion": { + "completion_type": "chat", + "include_history": false, + "include_input": true, + "max_input_tokens": 2000, + "max_tokens": 256, + "temperature": 0.7, + "top_p": 0.0, + "presence_penalty": 0.6, + "frequency_penalty": 0.0 + } +} +``` + +> Note that the configuration properties in the file do not include all the possible configurations. To learn more about the description of each configuration and all the supported configurations see the [`PromptTemplatConfig`](https://github.com/microsoft/teams-ai/blob/2d43f5ca5b3bf27844f760663641741cae4a3243/js/packages/teams-ai/src/prompts/PromptTemplate.ts#L46C18-L46C39) Typescript interface. + +These files can be found under the `src/prompts/chat/` folder. So, this prompt template's name is `chat`. Then, to plug these files in the Action Planner, the prompt manager has to be created with the folder path specified and then passed into the Action Planner constructor: + +**C#** +```cs +PromptManager prompts = new PromptManager(new PromptManagerOptions(){ + PromptFolder = "./prompts" +}); +``` + +The file path is relative to the source of file in which the `PromptManager` is created. In this case the `Program.cs` was in the same folder as the `prompts` folder. + +**Javascript** +```ts +const prompts = new PromptManager({ + promptsFolder: path.join(__dirname, '../src/prompts') +}); +``` + diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/quickstart.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/quickstart.txt new file mode 100644 index 000000000..24dd1d130 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/quickstart.txt @@ -0,0 +1,153 @@ +# Quickstart + +_**Navigation**_ +- [00.OVERVIEW](./README.md) +- [**01.QUICKSTART**](./QUICKSTART.md) +- [02.SAMPLES](./SAMPLES.md) +___ + +In this quickstart we will show you how to get the Echo Bot up and running. The Echo Bot echoes back messages sent to it while keeping track of the number of messages sent by the user. + + +* [C# Quickstart](#c-quickstart) +* [Javascript Quickstart](#javascript) + + +## C# Quickstart + +This guide will show you have the set up the Echo Bot using the C# library. + +### Prerequisites + +To get started, ensure that you have the following tools: + +| Install | For using... | +| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Visual Studio](https://visualstudio.microsoft.com/downloads/) (17.7.0 or greater) | C# build environments. Use the latest version. | +| [Teams Toolkit](https://learn.microsoft.com/en-us/microsoftteams/platform/toolkit/toolkit-v4/teams-toolkit-fundamentals-vs?pivots=visual-studio-v17-7) | Microsoft Visual Studio extension that creates a project scaffolding for your app. Use the latest version. | +| [Git](https://git-scm.com/downloads) | Git is a version control system that helps you manage different versions of code within a repository. | +| [Microsoft Teams](https://www.microsoft.com/microsoft-teams/download-app) | Microsoft Teams to collaborate with everyone you work with through apps for chat, meetings, and call-all in one place. | +| [Microsoft Edge](https://www.microsoft.com/edge) (recommended) or [Google Chrome](https://www.google.com/chrome/) | A browser with developer tools. | +| [Microsoft 365 developer account](/microsoftteams/platform/concepts/build-and-test/prepare-your-o365-tenant) | Access to Teams account with the appropriate permissions to install an app and [enable custom Teams apps and turn on custom app uploading](../../../concepts/build-and-test/prepare-your-o365-tenant.md#enable-custom-teams-apps-and-turn-on-custom-app-uploading). | + +
+ +### Build and run the sample app + +1. Clone the teams-ai repository + + ```cmd + git clone https://github.com/microsoft/teams-ai.git + ``` + +2. Open **Visual Studio** and select `Open a project or a solution`. + +3. Navigate to the `teams-ai/dotnet/samples/01.messaging.echoBot` folder and open the `Echobot.sln` file. + +4. In the debug dropdown menu, select *Dev Tunnels > Create A Tunnel* (Tunnel type: `Persistent` & Access: `Public`) or select an existing public dev tunnel. Ensure that the dev tunnel is selected. + + ![create a tunnel](https://learn.microsoft.com/en-us/microsoftteams/platform/assets/images/bots/dotnet-ai-library-dev-tunnel.png) + + + +5. Right-click your project and select *Teams Toolkit > Prepare Teams App Dependencies* + + ![prepare teams app dependencies](https://learn.microsoft.com/en-us/microsoftteams/platform/assets/images/bots/dotnet-ai-library-prepare-teams-app.png) + + > Note: If you are running into errors in this step, ensure that you have correctly configured the dev tunnels in step 4. + +6. If prompted, sign in with a Microsoft 365 account for the Teams organization you want to install the app to. + + > If you do not have permission to upload custom apps (sideloading), Teams Toolkit will recommend creating and using a [Microsoft 365 Developer Program](https://developer.microsoft.com/microsoft-365/dev-program) account - a free program to get your own dev environment sandbox that includes Teams. + +7. Press F5, or select the `Debug > Start Debugging` menu in Visual Studio. If step 3 was completed correctly then this should launch Teams on the browser. + +8. You should be prompted to sideload a bot into teams. Click the `Add` button to load the app in Teams. + + ![add echobot](./assets/quickstart-echobot-add.png) + +9. This should redirect you to a chat window with the bot. + + ![demo-image](./assets/quickstart-echobot-demo.png) + + +## Javascript + +This guide will show you have the set up the Echo Bot using the JS library. + +### Prerequisite + +| Install | For using... | +| ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Visual Studio Code](https://code.visualstudio.com/download) | Typescript build environments. Use the latest version. | +| [Teams Toolkit](https://marketplace.visualstudio.com/items?itemName=TeamsDevApp.ms-teams-vscode-extension) (5.3.x or greater) | Microsoft Visual Studio Code extension that creates a project scaffolding for your app. Use the latest version. | +| [Git](https://git-scm.com/downloads) | Git is a version control system that helps you manage different versions of code within a repository. | +| [Node.js](https://nodejs.org/en) (16 or 18) | Microsoft Teams to collaborate with everyone you work with through apps for chat, meetings, and call-all in one place. | +| [Microsoft Teams](https://www.microsoft.com/microsoft-teams/download-app) | Microsoft Teams to collaborate with everyone you work with through apps for chat, meetings, and call-all in one place. | +| [Microsoft Edge](https://www.microsoft.com/edge) (recommended) or [Google Chrome](https://www.google.com/chrome/) | A browser with developer tools. | +| [Microsoft 365 developer account](/microsoftteams/platform/concepts/build-and-test/prepare-your-o365-tenant) | Access to Teams account with the appropriate permissions to install an app and [enable custom Teams apps and turn on custom app uploading](../../../concepts/build-and-test/prepare-your-o365-tenant.md#enable-custom-teams-apps-and-turn-on-custom-app-uploading). || +[Yarn](https://yarnpkg.com/) (1.22.x or greater) | Node.js package manager used to install dependencies and build samples. | + +### Build and run the sample app + +1. Clone the repository. + ```cmd + git clone https://github.com/microsoft/teams-ai.git + ``` + +2. Go to **Visual Studio Code**. + +3. Select `File > Open Folder`. + +4. Go to the location where you cloned teams-ai repo and select the `teams-ai` folder. + +5. Click `Select Folder`. + + ![:::image type="content" source="../../../assets/images/bots/ai-library-dot-net-select-folder.png" alt-text="Screenshot shows the teams-ai folder and the Select Folder option.":::](https://learn.microsoft.com/en-us/microsoftteams/platform/assets/images/bots/ai-library-dot-net-select-folder.png) + +6. Select `View > Terminal`. A terminal window opens. + +7. In the terminal window, run the following command to go to the js folder: + + ``` + cd ./js/ + ``` + +8. Run the following command to install dependencies: + + ```terminal + yarn install + ``` + +9. Run the following command to build project and samples: + + ```terminal + yarn build + ``` + +10. After the dependencies are installed and project is built, select `File > Open Folder`. + +11. Go to `teams-ai > js > samples > 01.messaging.a.echoBot` and click `Select Folder`. This open the echo bot sample folder in vscode. + +12. From the left pane, select `Teams Toolkit`. + +13. Under `ACCOUNTS`, sign in to the following: + + * **Microsoft 365 account** + + +14. To debug your app, press the **F5** key. + + A browser tab opens a Teams web client requesting to add the bot to your tenant. + +15. Select **Add**. + + ![add-image](./assets/quickstart-echobot-add.png) + + A chat window opens. + +16. In the message compose area, send a message to invoke the bot. + + ![demo-image](./assets/quickstart-echobot-demo.png) + +The bot will echo back what the user sends it while keeping track of the number of messages sent by the user. diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/samples.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/samples.txt new file mode 100644 index 000000000..16a3e2977 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/samples.txt @@ -0,0 +1,77 @@ +# Samples + +_**Navigation**_ +- [00.OVERVIEW](./README.md) +- [01.QUICKSTART](./QUICKSTART.md) +- [**02.SAMPLES**](./SAMPLES.md) +___ + + +After completing the quickstart guide, the next step is to try out the samples. + +Samples are E2E teams apps that are easy to set up locally. There are various samples to showcase the different features supported. + +The following is a list of all the samples we support organized into four categories. If you are new to the library it is recommended to start with the basic samples. + +When you are ready to dive into the AI Samples, try the fully conversational Teams Chef Bot sample that illustrates how to use Retrieval Augmentation Generation to ground the AI model’s answers in custom documents and datasets. + +- [Samples](#samples) + - [Basic Samples](#basic-samples) + - [AI Samples](#ai-samples) + - [User Authentication Samples](#user-authentication-samples) + - [Advanced Samples](#advanced-samples) + +## Basic Samples + +These samples showcase basic functionalities to build message extensions and conversational bots. + +| Name | Description | Languages Supported | +| ---------------------------------- | ------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| *Echo bot* | Bot that echos back the users message. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/01.messaging.a.echoBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/01.messaging.echoBot) | +| *Search Command Message Extension* | Message Extension to search NPM for a specific package and return the result as an Adaptive Card. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/02.messageExtensions.a.searchCommand), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/02.messageExtensions.a.searchCommand) | +| *Type-Ahead Bot* | Bot that shows how to incorporate Adaptive Cards into the coversational flow. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/03.adaptiveCards.a.typeAheadBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/03.adaptiveCards.a.typeAheadBot) | + +## AI Samples + +These samples showcase the AI features supported by the library. It builds on the basics of implementing conversational bots and message extensions. + +| Name | Description | Languages Supported | +| ---------------------- | ------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| *Teams Chef Bot* | Bot that helps the user build Teams apps by answering queries against external data source. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.a.teamsChefBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/04.ai.a.teamsChefBot) | +| *AI Message Extension* | Message Extension that leverages GPT models to help users generate and update posts. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.b.messageExtensions.AI-ME), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/04.ai.b.messageExtensions.gptME) | +| *Light Bot* | Bot that can switch the light on or off. It uses AI to map users message to predefined actions (or skills) | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.c.actionMapping.lightBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/04.ai.c.actionMapping.lightBot) | +| *List Bot* | Bot that helps the user maintain task lists. It can add, remove, update, and search lists and tasks. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.d.chainedActions.listBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/04.ai.d.chainedActions.listBot) | +| *DevOps Bot* | Bot that helps the user perform DevOps actions such as create, update, triage and summarize work items. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.e.chainedActions.devOpsBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/04.ai.e.chainedActions.devOpsBot) | +| *Card Master Bot* | Bot with AI vision support that is able to generate Adaptive Cards from uploaded images by using GPT vision models. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.ai.f.vision.cardMaster) | +| *Twenty Questions Bot* | Bot that plays a game of twenty questions with the user. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/04.e.twentyQuestions), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/04.e.twentyQuestions) | +| *Chat Moderation Bot* | Bot that shows how to incorporate content safety control when using AI features. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/05.chatModeration) | +| *Math Tutor Bot* | Bot that is an expert in math. It uses OpenAI's Assisstants API. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.assistants.a.mathBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/06.assistants.a.mathBot) | +| *Food Ordering Bot* | Bot that can take a food order for a fictional restaurant called The Pub. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.assistants.b.orderBot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/06.assistants.b.orderBot) | + +## User Authentication Samples + +Being able to access user specific data from other third party services is a cruicial capability of any Teams app. These samples showcase the different ways to authenticate a user to third party services such as Microsoft Graph. + +There are two approaches to user authentication: `OAuth` and `TeamsSSO`. + +The `OAuth` approach requires creating an OAuth connection in the Azure Bot service. It uses the Bot Framework's token service to handle the OAuth2.0 flow on behalf of the bot server. + +The `TeamsSSO` approach implements the OAuth 2.0 protocol within the bot web server itself. It gives you more flexibility on how to configure Azure Active Directory (AAD), like using a client certificate. There is no need to create an OAuth connection in Azure Bot service. + +Both of these approaches can be used to achieve the same functionality, such as using the SSO flow to authenticate the user. + +| Name | Description | Languages Supported | +| ---------------------------- | ----------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| *OAuth Bot* | User authentication in a conversational bot with the `OAuth` apporach. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.auth.oauth.bot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/06.auth.oauth.bot) | +| *OAuth Message Extension* | User authentication in a message extension with the `OAuth` approach. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.auth.oauth.messageExtension), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/06.auth.oauth.messageExtension) | +| *OAuth Adaptive Card Bot* | User authentication in a conversational bot using ACv2 cards (Adaptive Cards v2) with the `OAuth` approach. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.auth.oauth.adaptiveCard) | +| *TeamsSSO Bot* | User authentication in a conversational bot using the `TeamsSSO` approach. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.auth.teamsSSO.bot), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/06.auth.teamsSSO.bot) | +| *TeamsSSO Message Extension* | User authentication in a message extension with the `TeamsSSO` approach. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/06.auth.teamsSSO.messageExtension), [C#](https://github.com/microsoft/teams-ai/tree/main/dotnet/samples/06.auth.teamsSSO.messageExtension) | + +## Advanced Samples + +These samples a combination of advanced features such as AI, user authentication, basic conversational bot and message extension capabilities, resulting in their complexity. + +| Name | Description | Languages Supported | +| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| *Who Bot* | Bot that can tell you who your manager is, when's your next meeting...etc. It will authenticate that user, use AI to map user intents to actions in which it will call Graph to get specific user data. | [JS](https://github.com/microsoft/teams-ai/tree/main/js/samples/07.whoBot) | diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/turns.txt b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/turns.txt new file mode 100644 index 000000000..842400cd7 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/files/turns.txt @@ -0,0 +1,105 @@ +# Turn Context and Turn State + +In a conversation, people often speak one-at-a-time, taking turns speaking. With a bot, it generally reacts to user input. Within the Teams AI Library, a turn consists of the user's incoming activity to the bot and any activity the bot sends back to the user as an immediate response. You can think of a _turn_ as the processing associated with the bot receiving a given activity. + +In each turn the _turn context_ and the _turn state_ are configured to manage conversational data. + +### Turn Context + +The turn context object provides information about the activity such as the sender and receiver, the channel, and other data needed to process the activity. + +The turn context is one of the most important abstractions in the SDK. Not only does it carry the inbound activity to all the middleware components and the application logic but it also provides the mechanism whereby the components and the bot logic can send outbound activities. + +#### Example + +The turn context object is accessible from the activity handler or an action. Here's how to use it send a message back to the user in an activity handler: + +##### C# + +```cs +app.OnActivity(ActivityTypes.Message, async (ITurnContext turnContext, TurnState turnState, CancellationToken cancellationToken) => +{ + // Extract user's message + string message = turnContext.Activity.Text; + await turnContext.sendActivity(`You said: ${message}`); +}) +``` + +##### JS/TS + +```ts +app.activity(ActivityTypes.Message, async (context: TurnContext, state: ApplicationTurnState) => { + // Extract user's message + let message = context.activity.text; + await context.sendActivity(`You said: ${message}`); +}); +``` + +##### Python + +```python +@app.activity("message") +async def on_message(context: TurnContext, state: TurnState): + # Extract user's message + message = context.activity.text + await context.send_activity(f"You said: {message}") + return True +``` + +### Turn State + +The turn state object stores cookie-like data for the current turn. Just like the turn context, it is carried through the entire application logic, including the activity handlers and the AI System. Unlike the turn context, the turn state is not fixed and is meant to be configured to each application-specific use case. It is common for apps to have conversation state, user state, and temp (temporary) state, but as a developer you can add or remove state objects to fit your needs. + +It is used to store information like the user's message, the conversation history, and any custom data configured by the application code. + +#### Example + +This is how a bot can keep track of the number of messages send by the user using the turn state: + +##### C# + +```cs +app.OnActivity(ActivityTypes.Message, async (ITurnContext turnContext, AppState turnState, CancellationToken cancellationToken) => +{ + int count = turnState.Conversation.MessageCount; + // Increment count state. + turnState.Conversation.MessageCount = ++count; + + // Send a message back to the user.... +}); +``` + + +##### JS/TS + +```ts +app.activity(ActivityTypes.Message, async (context: TurnContext, state: ApplicationTurnState) => { + let count = state.conversation.value.count ?? 0; + // Increment count state + state.conversation.value.count += 1; + + // Send a message back to the user.... +}); +``` + +##### Python + +```python +@app.activity("message") +async def on_message(context: TurnContext, state: AppTurnState): + count = state.conversation.count + # Increment count state + state.conversation.count += 1 + + # Send a message back to the user.... + return True +``` + +### Appendix + +
+What happens when the user sends a message to the bot? +
+ +When a message is sent by the user it is routed to the bots `HTTP POST` endpoint `/api/messages`, which +starts the routing process. \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.bicep b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.bicep new file mode 100644 index 000000000..e3877021b --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.bicep @@ -0,0 +1,113 @@ +@maxLength(20) +@minLength(4) +@description('Used to generate names for all resources in this file') +param resourceBaseName string + +@description('Required when create Azure Bot service') +param botAadAppClientId string + +@secure() +@description('Required by Bot Framework package in your bot project') +param botAadAppClientSecret string + +@secure() +param openAIApiKey string + +@secure() +param azureOpenAIApiKey string + +@secure() +param azureOpenAIEndpoint string + +@secure() +param azureContentSafetyApiKey string + +@secure() +param azureContentSafetyEndpoint string + +param webAppSKU string + +@maxLength(42) +param botDisplayName string + +param serverfarmsName string = resourceBaseName +param webAppName string = resourceBaseName +param location string = resourceGroup().location + +// Compute resources for your Web App +resource serverfarm 'Microsoft.Web/serverfarms@2021-02-01' = { + kind: 'app' + location: location + name: serverfarmsName + sku: { + name: webAppSKU + } +} + +// Web App that hosts your bot +resource webApp 'Microsoft.Web/sites@2021-02-01' = { + kind: 'app' + location: location + name: webAppName + properties: { + serverFarmId: serverfarm.id + httpsOnly: true + siteConfig: { + alwaysOn: true + appSettings: [ + { + name: 'WEBSITE_RUN_FROM_PACKAGE' + value: '1' // Run Azure APP Service from a package file + } + { + name: 'RUNNING_ON_AZURE' + value: '1' + } + { + name: 'BOT_ID' + value: botAadAppClientId + } + { + name: 'BOT_PASSWORD' + value: botAadAppClientSecret + } + { + name: 'OpenAI__ApiKey' + value: openAIApiKey + } + { + name: 'Azure__OpenAIApiKey' + value: azureOpenAIApiKey + } + { + name: 'Azure__OpenAIEndpoint' + value: azureOpenAIEndpoint + } + { + name: 'Azure__ContentSafetyApiKey' + value: azureContentSafetyApiKey + } + { + name: 'Azure__ContentSafetyEndpoint' + value: azureContentSafetyEndpoint + } + ] + ftpsState: 'FtpsOnly' + } + } +} + +// Register your web service as a bot with the Bot Framework +module azureBotRegistration './botRegistration/azurebot.bicep' = { + name: 'Azure-Bot-registration' + params: { + resourceBaseName: resourceBaseName + botAadAppClientId: botAadAppClientId + botAppDomain: webApp.properties.defaultHostName + botDisplayName: botDisplayName + } +} + +// The output will be persisted in .env.{envName}. Visit https://aka.ms/teamsfx-actions/arm-deploy for more details. +output BOT_AZURE_APP_SERVICE_RESOURCE_ID string = webApp.id +output BOT_DOMAIN string = webApp.properties.defaultHostName diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.parameters.json b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.parameters.json new file mode 100644 index 000000000..fd2fe07ad --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/azure.parameters.json @@ -0,0 +1,36 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "resourceBaseName": { + "value": "teamschefbot${{RESOURCE_SUFFIX}}" + }, + "botAadAppClientId": { + "value": "${{BOT_ID}}" + }, + "botAadAppClientSecret": { + "value": "${{SECRET_BOT_PASSWORD}}" + }, + "webAppSKU": { + "value": "B1" + }, + "botDisplayName": { + "value": "TeamsChefBot" + }, + "openAIApiKey": { + "value": "${{SECRET_OPENAI_API_KEY}}" + }, + "azureOpenAIApiKey": { + "value": "${{SECRET_AZURE_OPENAI_API_KEY}}" + }, + "azureOpenAIEndpoint": { + "value": "${{SECRET_AZURE_OPENAI_ENDPOINT}}" + }, + "azureContentSafetyApiKey": { + "value": "${{SECRET_AZURE_CONTENT_SAFETY_API_KEY}}" + }, + "azureContentSafetyEndpoint": { + "value": "${{SECRET_AZURE_CONTENT_SAFETY_ENDPOINT}}" + } + } +} \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/azurebot.bicep b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/azurebot.bicep new file mode 100644 index 000000000..ab67c7a56 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/azurebot.bicep @@ -0,0 +1,37 @@ +@maxLength(20) +@minLength(4) +@description('Used to generate names for all resources in this file') +param resourceBaseName string + +@maxLength(42) +param botDisplayName string + +param botServiceName string = resourceBaseName +param botServiceSku string = 'F0' +param botAadAppClientId string +param botAppDomain string + +// Register your web service as a bot with the Bot Framework +resource botService 'Microsoft.BotService/botServices@2021-03-01' = { + kind: 'azurebot' + location: 'global' + name: botServiceName + properties: { + displayName: botDisplayName + endpoint: 'https://${botAppDomain}/api/messages' + msaAppId: botAadAppClientId + } + sku: { + name: botServiceSku + } +} + +// Connect the bot service to Microsoft Teams +resource botServiceMsTeamsChannel 'Microsoft.BotService/botServices/channels@2021-03-01' = { + parent: botService + location: 'global' + name: 'MsTeamsChannel' + properties: { + channelName: 'MsTeamsChannel' + } +} diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/readme.md b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/readme.md new file mode 100644 index 000000000..d5416243c --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/infra/botRegistration/readme.md @@ -0,0 +1 @@ +The `azurebot.bicep` module is provided to help you create Azure Bot service when you don't use Azure to host your app. If you use Azure as infrastrcture for your app, `azure.bicep` under infra folder already leverages this module to create Azure Bot service for you. You don't need to deploy `azurebot.bicep` again. \ No newline at end of file diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.local.yml b/dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.local.yml new file mode 100644 index 000000000..df4523f58 --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.local.yml @@ -0,0 +1,87 @@ +# yaml-language-server: $schema=https://aka.ms/teams-toolkit/1.1.0/yaml.schema.json +# +# The teamsapp.local.yml composes automation tasks for Teams Toolkit when running locally. +# This file is used when selecting 'Prepare Teams App Dependencies' menu items in the Teams Toolkit for Visual Studio window +# +# You can customize this file. Visit https://aka.ms/teamsfx-v5.0-guide for more info about Teams Toolkit project files. +version: 1.1.0 + +# Defines what the `provision` lifecycle step does with Teams Toolkit. +provision: + # Automates the creation of a Teams app registration and saves the App ID to an environment file. + - uses: teamsApp/create + with: + # Teams app name + name: TeamsChef${{APP_NAME_SUFFIX}} + # Write the information of created resources into environment file for + # the specified environment variable(s). + writeToEnvironmentFile: + teamsAppId: TEAMS_APP_ID + + # Automates the creation an Azure AD app registration which is required for a bot. + # The Bot ID (AAD app client ID) and Bot Password (AAD app client secret) are saved to an environment file. + - uses: botAadApp/create + with: + # The Azure Active Directory application's display name + name: TeamsChef + writeToEnvironmentFile: + # The Azure Active Directory application's client id created for bot. + botId: BOT_ID + # The Azure Active Directory application's client secret created for bot. + botPassword: SECRET_BOT_PASSWORD + + # Automates the creation and configuration of a Bot Framework registration which is required for a bot. + # This configures the bot to use the Azure AD app registration created in the previous step. + - uses: botFramework/create + with: + botId: ${{BOT_ID}} + name: TeamsChef + messagingEndpoint: ${{BOT_ENDPOINT}}/api/messages + description: "" + channels: + - name: msteams + + # Generate runtime appsettings to JSON file + - uses: file/createOrUpdateJsonFile + with: + target: ./appsettings.Development.json + content: + BOT_ID: ${{BOT_ID}} + BOT_PASSWORD: ${{SECRET_BOT_PASSWORD}} + + # Optional: Automates schema and error checking of the Teams app manifest and outputs the results in the console. + - uses: teamsApp/validateManifest + with: + # Path to manifest template + manifestPath: ./appPackage/manifest.json + + # Automates the creation of a Teams app package (.zip). + - uses: teamsApp/zipAppPackage + with: + # Path to manifest template + manifestPath: ./appPackage/manifest.json + outputZipPath: ./appPackage/build/appPackage.${{TEAMSFX_ENV}}.zip + outputJsonPath: ./appPackage/build/manifest.${{TEAMSFX_ENV}}.json + + # Automates updating the Teams app manifest in Teams Developer Portal using the App ID from the mainfest file. + # This action ensures that any manifest changes are reflected when launching the app again in Teams. + - uses: teamsApp/update + with: + # Relative path to this file. This is the path for built zip file. + appPackagePath: ./appPackage/build/appPackage.${{TEAMSFX_ENV}}.zip + + # Create or update debug profile in lauchsettings file + - uses: file/createOrUpdateJsonFile + with: + target: ./Properties/launchSettings.json + content: + profiles: + Microsoft Teams (browser): + commandName: "Project" + dotnetRunMessages: true + launchBrowser: true + launchUrl: "https://teams.microsoft.com/l/app/${{TEAMS_APP_ID}}?installAppPackage=true&webjoin=true&appTenantId=${{TEAMS_APP_TENANT_ID}}&login_hint=${{TEAMSFX_M365_USER_NAME}}" + applicationUrl: "http://localhost:5130" + environmentVariables: + ASPNETCORE_ENVIRONMENT: "Development" + hotReloadProfile: "aspnetcore" diff --git a/dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.yml b/dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.yml new file mode 100644 index 000000000..c2f62631b --- /dev/null +++ b/dotnet/samples/04.ai.g.teamsChefBot-streaming/teamsapp.yml @@ -0,0 +1,98 @@ +# yaml-language-server: $schema=https://aka.ms/teams-toolkit/1.1.0/yaml.schema.json +# +# The teamsapp.local.yml composes automation tasks for Teams Toolkit when running locally. +# This file is used when selecting 'Provision' or 'Deploy' menu items in the Teams Toolkit for Visual Studio window +# +# You can customize this file. Visit https://aka.ms/teamsfx-v5.0-guide for more info about Teams Toolkit project files. +version: 1.1.0 + +environmentFolderPath: ./env + +# Defines what the `provision` lifecycle step does with Teams Toolkit. +provision: + # Automates the creation of a Teams app registration and saves the App ID to an environment file. + - uses: teamsApp/create + with: + # Teams app name + name: TeamsChef${{APP_NAME_SUFFIX}} + # Write the information of created resources into environment file for + # the specified environment variable(s). + writeToEnvironmentFile: + teamsAppId: TEAMS_APP_ID + + # Automates the creation an Azure AD app registration which is required for a bot. + # The Bot ID (AAD app client ID) and Bot Password (AAD app client secret) are saved to an environment file. + - uses: botAadApp/create + with: + # The Azure Active Directory application's display name + name: TeamsChef + writeToEnvironmentFile: + # The Azure Active Directory application's client id created for bot. + botId: BOT_ID + # The Azure Active Directory application's client secret created for bot. + botPassword: SECRET_BOT_PASSWORD + + # Automates the creation of infrastructure defined in ARM templates to host the bot. + # The created resource IDs are saved to an environment file. + - uses: arm/deploy # Deploy given ARM templates parallelly. + with: + # AZURE_SUBSCRIPTION_ID is a built-in environment variable, + # if its value is empty, TeamsFx will prompt you to select a subscription. + # Referencing other environment variables with empty values + # will skip the subscription selection prompt. + subscriptionId: ${{AZURE_SUBSCRIPTION_ID}} + # AZURE_RESOURCE_GROUP_NAME is a built-in environment variable, + # if its value is empty, TeamsFx will prompt you to select or create one + # resource group. + # Referencing other environment variables with empty values + # will skip the resource group selection prompt. + resourceGroupName: ${{AZURE_RESOURCE_GROUP_NAME}} + templates: + - path: ./infra/azure.bicep # Relative path to this file + # Relative path to this yaml file. + # Placeholders will be replaced with corresponding environment + # variable before ARM deployment. + parameters: ./infra/azure.parameters.json + # Required when deploying ARM template + deploymentName: Create-resources-for-tab + # Teams Toolkit will download this bicep CLI version from github for you, + # will use bicep CLI in PATH if you remove this config. + bicepCliVersion: v0.9.1 + + # Optional: Automates schema and error checking of the Teams app manifest and outputs the results in the console. + - uses: teamsApp/validateManifest + with: + # Path to manifest template + manifestPath: ./appPackage/manifest.json + + # Automates creating a final app package (.zip) by replacing any variables in the manifest.json file for the current environment. + - uses: teamsApp/zipAppPackage + with: + # Path to manifest template + manifestPath: ./appPackage/manifest.json + outputZipPath: ./appPackage/build/appPackage.${{TEAMSFX_ENV}}.zip + outputJsonPath: ./appPackage/build/manifest.${{TEAMSFX_ENV}}.json + + # Automates updating the Teams app manifest in Teams Developer Portal using the App ID from the mainfest file. + # This action ensures that any manifest changes are reflected when launching the app again in Teams. + - uses: teamsApp/update + with: + # Relative path to this file. This is the path for built zip file. + appPackagePath: ./appPackage/build/appPackage.${{TEAMSFX_ENV}}.zip + +# Triggered when 'teamsfx deploy' is executed +deploy: + - uses: cli/runDotnetCommand + with: + args: publish --configuration Release --runtime win-x86 --self-contained + + # Deploy to an Azure App Service using the zip file created in the provision step. + - uses: azureAppService/zipDeploy + with: + # deploy base folder + artifactFolder: bin/Release/net6.0/win-x86/publish + # This example uses the env var thats generated by the arm/deploy action. + # You can replace it with an existing Azure Resource ID or other + # custom environment variable. + resourceId: ${{BOT_AZURE_APP_SERVICE_RESOURCE_ID}} +projectId: 1231a343-0fe4-47d7-8f62-e77b092574aa