diff --git a/spring-boot-example/src/main/java/dev/langchain4j/example/aiservice/AssistantConfiguration.java b/spring-boot-example/src/main/java/dev/langchain4j/example/aiservice/AssistantConfiguration.java index 7a45e261..afe6d6e1 100644 --- a/spring-boot-example/src/main/java/dev/langchain4j/example/aiservice/AssistantConfiguration.java +++ b/spring-boot-example/src/main/java/dev/langchain4j/example/aiservice/AssistantConfiguration.java @@ -1,7 +1,16 @@ package dev.langchain4j.example.aiservice; +import dev.langchain4j.example.lowlevel.ChatLanguageModelController; import dev.langchain4j.memory.ChatMemory; import dev.langchain4j.memory.chat.MessageWindowChatMemory; +import dev.langchain4j.model.chat.ChatLanguageModel; +import dev.langchain4j.model.chat.StreamingChatLanguageModel; +import dev.langchain4j.model.chat.listener.ChatModelErrorContext; +import dev.langchain4j.model.chat.listener.ChatModelListener; +import dev.langchain4j.model.chat.listener.ChatModelRequestContext; +import dev.langchain4j.model.chat.listener.ChatModelResponseContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @@ -15,4 +24,33 @@ class AssistantConfiguration { ChatMemory chatMemory() { return MessageWindowChatMemory.withMaxMessages(10); } + + /** + * This listener will be injected into every {@link ChatLanguageModel} and {@link StreamingChatLanguageModel} + * bean found in the application context. + * It will listen for {@link ChatLanguageModel} in the {@link ChatLanguageModelController} as well as + * {@link Assistant} and {@link StreamingAssistant}. + */ + @Bean + ChatModelListener chatModelListener() { + return new ChatModelListener() { + + private static final Logger log = LoggerFactory.getLogger(ChatModelListener.class); + + @Override + public void onRequest(ChatModelRequestContext requestContext) { + log.info("onRequest(): {}", requestContext.chatRequest()); + } + + @Override + public void onResponse(ChatModelResponseContext responseContext) { + log.info("onResponse(): {}", responseContext.chatResponse()); + } + + @Override + public void onError(ChatModelErrorContext errorContext) { + log.info("onError(): {}", errorContext.error().getMessage()); + } + }; + } } diff --git a/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java b/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java index 4ea93f7e..67ef1073 100644 --- a/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java +++ b/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java @@ -9,7 +9,7 @@ * This is an example of using a {@link ChatLanguageModel}, a low-level LangChain4j API. */ @RestController -class ChatLanguageModelController { +public class ChatLanguageModelController { ChatLanguageModel chatLanguageModel; diff --git a/spring-boot-example/src/main/resources/application.properties b/spring-boot-example/src/main/resources/application.properties index 705b6e7e..09a3d621 100644 --- a/spring-boot-example/src/main/resources/application.properties +++ b/spring-boot-example/src/main/resources/application.properties @@ -1,10 +1,12 @@ langchain4j.open-ai.chat-model.api-key=${OPENAI_API_KEY} langchain4j.open-ai.chat-model.model-name=gpt-4o-mini +#langchain4j.open-ai.chat-model.log-requests=true +#langchain4j.open-ai.chat-model.log-responses=true langchain4j.open-ai.streaming-chat-model.api-key=${OPENAI_API_KEY} langchain4j.open-ai.streaming-chat-model.model-name=gpt-4o-mini +#langchain4j.open-ai.streaming-chat-model.log-requests=true +#langchain4j.open-ai.streaming-chat-model.log-responses=true -langchain4j.open-ai.chat-model.log-requests=true -langchain4j.open-ai.chat-model.log-responses=true logging.level.dev.langchain4j=DEBUG -logging.level.dev.ai4j.openai4j=DEBUG \ No newline at end of file +logging.level.dev.ai4j.openai4j=DEBUG