diff --git a/anthropic-examples/pom.xml b/anthropic-examples/pom.xml
index 027c0d21..34dbeab9 100644
--- a/anthropic-examples/pom.xml
+++ b/anthropic-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
anthropic-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-anthropic
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/anthropic-examples/src/main/java/AnthropicChatModelTest.java b/anthropic-examples/src/main/java/AnthropicChatModelTest.java
index 277a53ca..fb97572a 100644
--- a/anthropic-examples/src/main/java/AnthropicChatModelTest.java
+++ b/anthropic-examples/src/main/java/AnthropicChatModelTest.java
@@ -1,9 +1,12 @@
-import dev.langchain4j.data.message.*;
+import dev.langchain4j.data.message.ImageContent;
+import dev.langchain4j.data.message.SystemMessage;
+import dev.langchain4j.data.message.TextContent;
+import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.anthropic.AnthropicChatModel;
import dev.langchain4j.model.anthropic.AnthropicChatModelName;
import dev.langchain4j.model.anthropic.AnthropicTokenUsage;
import dev.langchain4j.model.chat.ChatLanguageModel;
-import dev.langchain4j.model.output.Response;
+import dev.langchain4j.model.chat.response.ChatResponse;
import org.junit.jupiter.api.Test;
import java.util.Base64;
@@ -25,7 +28,7 @@ class AnthropicChatModelTest {
@Test
void AnthropicChatModel_Example() {
- String answer = model.generate("What is the capital of Germany?");
+ String answer = model.chat("What is the capital of Germany?");
assertThat(answer).containsIgnoringCase("Berlin");
}
@@ -34,15 +37,16 @@ void AnthropicChatModel_Example() {
void AnthropicChatModel_with_vision_Example() {
byte[] image = readBytes("https://docs.langchain4j.dev/img/langchain4j-components.png");
+ String base64EncodedImage = Base64.getEncoder().encodeToString(image);
UserMessage userMessage = UserMessage.from(
TextContent.from("What do you see?"),
- ImageContent.from(Base64.getEncoder().encodeToString(image), "image/png")
+ ImageContent.from(base64EncodedImage, "image/png")
);
- Response response = model.generate(userMessage);
+ ChatResponse chatResponse = model.chat(userMessage);
- assertThat(response.content().text()).containsIgnoringCase("RAG");
+ assertThat(chatResponse.aiMessage().text()).containsIgnoringCase("RAG");
}
@Test
@@ -62,14 +66,14 @@ void AnthropicChatModel_with_cache_system_message_Example() {
// create cache
SystemMessage systemMessage = SystemMessage.from("What types of messages are supported in LangChain?".repeat(187));
UserMessage userMessage = UserMessage.userMessage("what result it calcule 5x2 + 2x + 2 = 0?");
- Response response = modelWithCache.generate(systemMessage, userMessage);
+ ChatResponse response = modelWithCache.chat(systemMessage, userMessage);
- AnthropicTokenUsage createCacheTokenUsage = (AnthropicTokenUsage) response.tokenUsage();
+ AnthropicTokenUsage createCacheTokenUsage = (AnthropicTokenUsage) response.metadata().tokenUsage();
assertThat(createCacheTokenUsage.cacheCreationInputTokens()).isGreaterThan(0);
// read cache created
- Response responseToReadCache = modelWithCache.generate(systemMessage, userMessage);
- AnthropicTokenUsage readCacheTokenUsage = (AnthropicTokenUsage) responseToReadCache.tokenUsage();
+ ChatResponse responseToReadCache = modelWithCache.chat(systemMessage, userMessage);
+ AnthropicTokenUsage readCacheTokenUsage = (AnthropicTokenUsage) responseToReadCache.metadata().tokenUsage();
assertThat(readCacheTokenUsage.cacheReadInputTokens()).isGreaterThan(0);
}
}
diff --git a/anthropic-examples/src/main/java/AnthropicStreamingChatModelTest.java b/anthropic-examples/src/main/java/AnthropicStreamingChatModelTest.java
index c527a135..ef1333a4 100644
--- a/anthropic-examples/src/main/java/AnthropicStreamingChatModelTest.java
+++ b/anthropic-examples/src/main/java/AnthropicStreamingChatModelTest.java
@@ -1,8 +1,7 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.anthropic.AnthropicStreamingChatModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
-import dev.langchain4j.model.output.Response;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import org.junit.jupiter.api.Test;
import java.util.concurrent.CompletableFuture;
@@ -23,19 +22,19 @@ class AnthropicStreamingChatModelTest {
@Test
void AnthropicChatModel_Example() throws ExecutionException, InterruptedException {
- CompletableFuture future = new CompletableFuture<>();
+ CompletableFuture future = new CompletableFuture<>();
- model.generate("What is the capital of Germany?", new StreamingResponseHandler() {
+ model.chat("What is the capital of Germany?", new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.println("New token: '" + token + "'");
+ public void onPartialResponse(String partialResponse) {
+ System.out.println("New token: '" + partialResponse + "'");
}
@Override
- public void onComplete(Response response) {
- System.out.println("Streaming completed: " + response);
- future.complete(response.content());
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ System.out.println("Streaming completed: " + completeResponse);
+ future.complete(completeResponse);
}
@Override
@@ -44,6 +43,6 @@ public void onError(Throwable error) {
}
});
- assertThat(future.get().text()).containsIgnoringCase("Berlin");
+ assertThat(future.get().aiMessage().text()).containsIgnoringCase("Berlin");
}
}
diff --git a/azure-open-ai-customer-support-agent-example/pom.xml b/azure-open-ai-customer-support-agent-example/pom.xml
index c74d0f66..485f4e25 100644
--- a/azure-open-ai-customer-support-agent-example/pom.xml
+++ b/azure-open-ai-customer-support-agent-example/pom.xml
@@ -12,7 +12,7 @@
dev.langchain4j
azure-open-ai-customer-support-agent-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -23,19 +23,19 @@
dev.langchain4j
langchain4j-spring-boot-starter
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-azure-open-ai-spring-boot-starter
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/azure-open-ai-examples/pom.xml b/azure-open-ai-examples/pom.xml
index 4817cbf6..6d109215 100644
--- a/azure-open-ai-examples/pom.xml
+++ b/azure-open-ai-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
azure-open-ai-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-azure-open-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/azure-open-ai-examples/src/main/java/AzureOpenAiChatModelExamples.java b/azure-open-ai-examples/src/main/java/AzureOpenAiChatModelExamples.java
index 210a6c45..52cc81fd 100644
--- a/azure-open-ai-examples/src/main/java/AzureOpenAiChatModelExamples.java
+++ b/azure-open-ai-examples/src/main/java/AzureOpenAiChatModelExamples.java
@@ -15,7 +15,7 @@ public static void main(String[] args) {
.logRequestsAndResponses(true)
.build();
- String response = model.generate("Provide 3 short bullet points explaining why Java is awesome");
+ String response = model.chat("Provide 3 short bullet points explaining why Java is awesome");
System.out.println(response);
}
diff --git a/azure-open-ai-examples/src/main/java/AzureOpenAiFunctionCallingExamples.java b/azure-open-ai-examples/src/main/java/AzureOpenAiFunctionCallingExamples.java
index 9f38d116..97b612fa 100644
--- a/azure-open-ai-examples/src/main/java/AzureOpenAiFunctionCallingExamples.java
+++ b/azure-open-ai-examples/src/main/java/AzureOpenAiFunctionCallingExamples.java
@@ -1,23 +1,22 @@
-import dev.langchain4j.agent.tool.P;
-import dev.langchain4j.agent.tool.Tool;
-import dev.langchain4j.agent.tool.ToolExecutionRequest;
-import dev.langchain4j.agent.tool.ToolSpecification;
-import dev.langchain4j.agent.tool.ToolSpecifications;
+import dev.langchain4j.agent.tool.*;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
import dev.langchain4j.data.message.UserMessage;
-import dev.langchain4j.service.tool.DefaultToolExecutor;
-import dev.langchain4j.service.tool.ToolExecutor;
-import static dev.langchain4j.data.message.UserMessage.userMessage;
import dev.langchain4j.model.azure.AzureOpenAiChatModel;
import dev.langchain4j.model.chat.ChatLanguageModel;
+import dev.langchain4j.model.chat.request.ChatRequest;
+import dev.langchain4j.model.chat.request.ChatRequestParameters;
+import dev.langchain4j.service.tool.DefaultToolExecutor;
+import dev.langchain4j.service.tool.ToolExecutor;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
+import static dev.langchain4j.data.message.UserMessage.userMessage;
+
public class AzureOpenAiFunctionCallingExamples {
@@ -42,7 +41,7 @@ static class Weather_From_Manual_Configuration {
.logRequestsAndResponses(true)
.build();
- public static void main(String[] args) throws Exception {
+ public static void main(String[] args) {
// STEP 1: User specify tools and query
// Tools
@@ -54,8 +53,14 @@ public static void main(String[] args) throws Exception {
chatMessages.add(userMessage);
- // STEP 2: Model generate function arguments
- AiMessage aiMessage = azureOpenAiModel.generate(chatMessages, toolSpecifications).content();
+ // STEP 2: Model generates function arguments
+ ChatRequest chatRequest = ChatRequest.builder()
+ .messages(chatMessages)
+ .parameters(ChatRequestParameters.builder()
+ .toolSpecifications(toolSpecifications)
+ .build())
+ .build();
+ AiMessage aiMessage = azureOpenAiModel.chat(chatRequest).aiMessage();
List toolExecutionRequests = aiMessage.toolExecutionRequests();
System.out.println("Out of the " + toolSpecifications.size() + " functions declared in WeatherTools, " + toolExecutionRequests.size() + " will be invoked:");
toolExecutionRequests.forEach(toolExecutionRequest -> {
@@ -76,7 +81,7 @@ public static void main(String[] args) throws Exception {
// STEP 4: Model generate final response
- AiMessage finalResponse = azureOpenAiModel.generate(chatMessages).content();
+ AiMessage finalResponse = azureOpenAiModel.chat(chatMessages).aiMessage();
System.out.println(finalResponse.text()); //According to the payment data, the payment status of transaction T1005 is Pending.
}
}
diff --git a/azure-open-ai-examples/src/main/java/AzureOpenAiSecurityExamples.java b/azure-open-ai-examples/src/main/java/AzureOpenAiSecurityExamples.java
index d96bf528..12041d2a 100644
--- a/azure-open-ai-examples/src/main/java/AzureOpenAiSecurityExamples.java
+++ b/azure-open-ai-examples/src/main/java/AzureOpenAiSecurityExamples.java
@@ -24,7 +24,7 @@ public static void main(String[] args) {
.logRequestsAndResponses(true)
.build();
- String response = model.generate("Provide 3 short bullet points explaining why Java is awesome");
+ String response = model.chat("Provide 3 short bullet points explaining why Java is awesome");
System.out.println(response);
}
@@ -46,7 +46,7 @@ public static void main(String[] args) {
.logRequestsAndResponses(true)
.build();
- String response = model.generate("Provide 3 short bullet points explaining why Java is awesome");
+ String response = model.chat("Provide 3 short bullet points explaining why Java is awesome");
System.out.println(response);
}
diff --git a/azure-open-ai-examples/src/main/java/AzureOpenAiStreamingChatModelExamples.java b/azure-open-ai-examples/src/main/java/AzureOpenAiStreamingChatModelExamples.java
index d0e6db23..8755b2bb 100644
--- a/azure-open-ai-examples/src/main/java/AzureOpenAiStreamingChatModelExamples.java
+++ b/azure-open-ai-examples/src/main/java/AzureOpenAiStreamingChatModelExamples.java
@@ -1,7 +1,6 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.azure.AzureOpenAiStreamingChatModel;
-import dev.langchain4j.model.output.Response;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import java.util.concurrent.CompletableFuture;
@@ -21,17 +20,18 @@ public static void main(String[] args) {
String userMessage = "Write a 100-word poem about Java and AI";
- CompletableFuture> futureResponse = new CompletableFuture<>();
- model.generate(userMessage, new StreamingResponseHandler() {
+ CompletableFuture futureResponse = new CompletableFuture<>();
+
+ model.chat(userMessage, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
- futureResponse.complete(response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ futureResponse.complete(completeResponse);
}
@Override
diff --git a/bedrock-examples/pom.xml b/bedrock-examples/pom.xml
index fbac807e..592722d7 100644
--- a/bedrock-examples/pom.xml
+++ b/bedrock-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
bedrock-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-bedrock
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/bedrock-examples/src/main/java/BedrockChatModelExample.java b/bedrock-examples/src/main/java/BedrockChatModelExample.java
index d6f217af..9643e865 100644
--- a/bedrock-examples/src/main/java/BedrockChatModelExample.java
+++ b/bedrock-examples/src/main/java/BedrockChatModelExample.java
@@ -20,7 +20,7 @@ public static void main(String[] args) {
// Other parameters can be set as well
.build();
- String joke = model.generate("Tell me a joke about Java");
+ String joke = model.chat("Tell me a joke about Java");
System.out.println(joke);
}
diff --git a/bedrock-examples/src/main/java/BedrockStreamingChatModelExample.java b/bedrock-examples/src/main/java/BedrockStreamingChatModelExample.java
index 77b9adba..a2d300a2 100644
--- a/bedrock-examples/src/main/java/BedrockStreamingChatModelExample.java
+++ b/bedrock-examples/src/main/java/BedrockStreamingChatModelExample.java
@@ -1,9 +1,8 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.bedrock.BedrockAnthropicMessageChatModel;
import dev.langchain4j.model.bedrock.BedrockAnthropicStreamingChatModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
-import dev.langchain4j.model.output.Response;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import software.amazon.awssdk.regions.Region;
public class BedrockStreamingChatModelExample {
@@ -14,8 +13,7 @@ public static void main(String[] args) {
// AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
// More info on creating the API keys:
// https://docs.aws.amazon.com/bedrock/latest/userguide/api-setup.html
- StreamingChatLanguageModel model = BedrockAnthropicStreamingChatModel
- .builder()
+ StreamingChatLanguageModel model = BedrockAnthropicStreamingChatModel.builder()
.temperature(0.50f)
.maxTokens(300)
.region(Region.US_EAST_1)
@@ -24,16 +22,16 @@ public static void main(String[] args) {
// Other parameters can be set as well
.build();
- model.generate("Write a poem about Java", new StreamingResponseHandler() {
+ model.chat("Write a poem about Java", new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.println("onNext(): " + token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.println("onPartialResponse(): " + partialResponse);
}
@Override
- public void onComplete(Response response) {
- System.out.println("onComplete(): " + response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ System.out.println("onCompleteResponse(): " + completeResponse);
}
@Override
diff --git a/chroma-example/pom.xml b/chroma-example/pom.xml
index 54ecc638..6d1090b2 100644
--- a/chroma-example/pom.xml
+++ b/chroma-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
chroma-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-chroma
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/customer-support-agent-example/pom.xml b/customer-support-agent-example/pom.xml
index 5c288851..1f81e838 100644
--- a/customer-support-agent-example/pom.xml
+++ b/customer-support-agent-example/pom.xml
@@ -12,7 +12,7 @@
dev.langchain4j
customer-support-agent-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -23,19 +23,19 @@
dev.langchain4j
langchain4j-spring-boot-starter
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-open-ai-spring-boot-starter
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/customer-support-agent-example/src/test/java/dev/langchain4j/example/utils/JudgeModelAssertions.java b/customer-support-agent-example/src/test/java/dev/langchain4j/example/utils/JudgeModelAssertions.java
index 83406116..7e56d985 100644
--- a/customer-support-agent-example/src/test/java/dev/langchain4j/example/utils/JudgeModelAssertions.java
+++ b/customer-support-agent-example/src/test/java/dev/langchain4j/example/utils/JudgeModelAssertions.java
@@ -5,6 +5,7 @@
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.request.ChatRequest;
+import dev.langchain4j.model.chat.request.ChatRequestParameters;
import dev.langchain4j.model.chat.request.ResponseFormat;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.service.output.JsonSchemas;
@@ -93,7 +94,9 @@ public TextAssertion satisfies(List conditions) {
),
UserMessage.from("%s".formatted(text))
)
- .responseFormat(RESPONSE_FORMAT)
+ .parameters(ChatRequestParameters.builder()
+ .responseFormat(RESPONSE_FORMAT)
+ .build())
.build();
ChatResponse chatResponse = judgeModel.chat(chatRequest);
diff --git a/dbpedia-example/pom.xml b/dbpedia-example/pom.xml
index 869b4324..5f41ad5f 100644
--- a/dbpedia-example/pom.xml
+++ b/dbpedia-example/pom.xml
@@ -12,7 +12,7 @@
17
17
UTF-8
- 1.0.0-alpha1
+ 1.0.0-beta1
3.17.0
diff --git a/elasticsearch-example/pom.xml b/elasticsearch-example/pom.xml
index e327f637..9434c643 100644
--- a/elasticsearch-example/pom.xml
+++ b/elasticsearch-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
elasticsearch-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-elasticsearch
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/github-models-examples/pom.xml b/github-models-examples/pom.xml
index 4dfa312b..63ab995f 100644
--- a/github-models-examples/pom.xml
+++ b/github-models-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
github-models-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-github-models
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/github-models-examples/src/main/java/GitHubModelsChatModelExamples.java b/github-models-examples/src/main/java/GitHubModelsChatModelExamples.java
index bbbe0359..36af4809 100644
--- a/github-models-examples/src/main/java/GitHubModelsChatModelExamples.java
+++ b/github-models-examples/src/main/java/GitHubModelsChatModelExamples.java
@@ -14,7 +14,7 @@ public static void main(String[] args) {
.logRequestsAndResponses(true)
.build();
- String response = model.generate("Provide 3 short bullet points explaining why Java is awesome");
+ String response = model.chat("Provide 3 short bullet points explaining why Java is awesome");
System.out.println(response);
}
diff --git a/github-models-examples/src/main/java/GitHubModelsStreamingChatModelExamples.java b/github-models-examples/src/main/java/GitHubModelsStreamingChatModelExamples.java
index 1ee1e562..3b2ee7bf 100644
--- a/github-models-examples/src/main/java/GitHubModelsStreamingChatModelExamples.java
+++ b/github-models-examples/src/main/java/GitHubModelsStreamingChatModelExamples.java
@@ -1,7 +1,6 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.github.GitHubModelsStreamingChatModel;
-import dev.langchain4j.model.output.Response;
import java.util.concurrent.CompletableFuture;
@@ -21,17 +20,18 @@ public static void main(String[] args) {
String userMessage = "Write a 100-word poem about Java and AI";
- CompletableFuture> futureResponse = new CompletableFuture<>();
- model.generate(userMessage, new StreamingResponseHandler() {
+ CompletableFuture futureResponse = new CompletableFuture<>();
+
+ model.chat(userMessage, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
- futureResponse.complete(response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ futureResponse.complete(completeResponse);
}
@Override
diff --git a/infinispan-example/pom.xml b/infinispan-example/pom.xml
index 5399696d..874ddb85 100644
--- a/infinispan-example/pom.xml
+++ b/infinispan-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
infinispan-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-infinispan
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/jakartaee-microprofile-example/pom.xml b/jakartaee-microprofile-example/pom.xml
index b976d881..269c1f27 100644
--- a/jakartaee-microprofile-example/pom.xml
+++ b/jakartaee-microprofile-example/pom.xml
@@ -32,12 +32,12 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-hugging-face
- 1.0.0-alpha1
+ 1.0.0-beta1
org.slf4j
diff --git a/jakartaee-microprofile-example/src/main/java/dev/langchain4j/example/rest/ModelResource.java b/jakartaee-microprofile-example/src/main/java/dev/langchain4j/example/rest/ModelResource.java
index 4aaffdee..524aea6a 100644
--- a/jakartaee-microprofile-example/src/main/java/dev/langchain4j/example/rest/ModelResource.java
+++ b/jakartaee-microprofile-example/src/main/java/dev/langchain4j/example/rest/ModelResource.java
@@ -1,23 +1,9 @@
package dev.langchain4j.example.rest;
-import static dev.langchain4j.data.message.SystemMessage.systemMessage;
-import static dev.langchain4j.data.message.UserMessage.userMessage;
-import static dev.langchain4j.data.segment.TextSegment.textSegment;
-import static dev.langchain4j.model.huggingface.HuggingFaceModelName.SENTENCE_TRANSFORMERS_ALL_MINI_LM_L6_V2;
-import static dev.langchain4j.model.huggingface.HuggingFaceModelName.TII_UAE_FALCON_7B_INSTRUCT;
-import static dev.langchain4j.store.embedding.CosineSimilarity.between;
-import static dev.langchain4j.store.embedding.RelevanceScore.fromCosineSimilarity;
-import static java.time.Duration.ofSeconds;
-
-import java.util.List;
-import java.util.Properties;
-
-import org.eclipse.microprofile.config.inject.ConfigProperty;
-import org.eclipse.microprofile.openapi.annotations.Operation;
-
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.SystemMessage;
+import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.huggingface.HuggingFaceChatModel;
import dev.langchain4j.model.huggingface.HuggingFaceEmbeddingModel;
@@ -29,6 +15,18 @@
import jakarta.ws.rs.Produces;
import jakarta.ws.rs.QueryParam;
import jakarta.ws.rs.core.MediaType;
+import org.eclipse.microprofile.config.inject.ConfigProperty;
+import org.eclipse.microprofile.openapi.annotations.Operation;
+
+import java.util.List;
+import java.util.Properties;
+
+import static dev.langchain4j.data.segment.TextSegment.textSegment;
+import static dev.langchain4j.model.huggingface.HuggingFaceModelName.SENTENCE_TRANSFORMERS_ALL_MINI_LM_L6_V2;
+import static dev.langchain4j.model.huggingface.HuggingFaceModelName.TII_UAE_FALCON_7B_INSTRUCT;
+import static dev.langchain4j.store.embedding.CosineSimilarity.between;
+import static dev.langchain4j.store.embedding.RelevanceScore.fromCosineSimilarity;
+import static java.time.Duration.ofSeconds;
@ApplicationScoped
@Path("model")
@@ -44,25 +42,25 @@ public class ModelResource {
private HuggingFaceLanguageModel getLanguageModel() {
if (languageModel == null) {
languageModel = HuggingFaceLanguageModel.builder()
- .accessToken(HUGGING_FACE_API_KEY)
- .modelId(TII_UAE_FALCON_7B_INSTRUCT)
- .timeout(ofSeconds(120))
- .temperature(1.0)
- .maxNewTokens(30)
- .waitForModel(true)
- .build();
+ .accessToken(HUGGING_FACE_API_KEY)
+ .modelId(TII_UAE_FALCON_7B_INSTRUCT)
+ .timeout(ofSeconds(120))
+ .temperature(1.0)
+ .maxNewTokens(30)
+ .waitForModel(true)
+ .build();
}
return languageModel;
}
-
+
private HuggingFaceEmbeddingModel getEmbeddingModel() {
if (embeddingModel == null) {
embeddingModel = HuggingFaceEmbeddingModel.builder()
- .accessToken(HUGGING_FACE_API_KEY)
- .modelId(SENTENCE_TRANSFORMERS_ALL_MINI_LM_L6_V2)
- .timeout(ofSeconds(120))
- .waitForModel(true)
- .build();
+ .accessToken(HUGGING_FACE_API_KEY)
+ .modelId(SENTENCE_TRANSFORMERS_ALL_MINI_LM_L6_V2)
+ .timeout(ofSeconds(120))
+ .waitForModel(true)
+ .build();
}
return embeddingModel;
}
@@ -71,9 +69,9 @@ private HuggingFaceEmbeddingModel getEmbeddingModel() {
@Produces(MediaType.TEXT_PLAIN)
@Path("language")
@Operation(
- summary = "Use the language model.",
- description = "Provide a sequence of words to a large language model.",
- operationId = "languageModelAsk" )
+ summary = "Use the language model.",
+ description = "Provide a sequence of words to a large language model.",
+ operationId = "languageModelAsk")
public String languageModelAsk(@QueryParam("question") String question) {
HuggingFaceLanguageModel model = getLanguageModel();
@@ -93,33 +91,30 @@ public String languageModelAsk(@QueryParam("question") String question) {
@Produces(MediaType.APPLICATION_JSON)
@Path("chat")
@Operation(
- summary = "Use the chat model.",
- description = "Assume you are talking with an agent that is knowledgeable about " +
- "Large Language Models. Ask any question about it.",
- operationId = "chatModelAsk" )
+ summary = "Use the chat model.",
+ description = "Assume you are talking with an agent that is knowledgeable about " +
+ "Large Language Models. Ask any question about it.",
+ operationId = "chatModelAsk")
public List chatModelAsk(@QueryParam("userMessage") String userMessage) {
HuggingFaceChatModel model = HuggingFaceChatModel.builder()
- .accessToken(HUGGING_FACE_API_KEY)
- .modelId(TII_UAE_FALCON_7B_INSTRUCT)
- .timeout(ofSeconds(120))
- .temperature(1.0)
- .maxNewTokens(200)
- .waitForModel(true)
- .build();
-
- SystemMessage systemMessage =
- systemMessage("You are very knowledgeble about Large Language Models. Be friendly. Give concise answers.");
-
- AiMessage aiMessage = model.generate(
- systemMessage,
- userMessage(userMessage)
- ).content();
+ .accessToken(HUGGING_FACE_API_KEY)
+ .modelId(TII_UAE_FALCON_7B_INSTRUCT)
+ .timeout(ofSeconds(120))
+ .temperature(1.0)
+ .maxNewTokens(200)
+ .waitForModel(true)
+ .build();
+
+ SystemMessage systemMessage = SystemMessage.from(
+ "You are very knowledgeable about Large Language Models. Be friendly. Give concise answers.");
+
+ AiMessage aiMessage = model.chat(systemMessage, UserMessage.from(userMessage)).aiMessage();
return List.of(
- "System: " + systemMessage.text(),
- "Me: " + userMessage,
- "Agent: " + aiMessage.text().trim());
+ "System: " + systemMessage.text(),
+ "Me: " + userMessage,
+ "Agent: " + aiMessage.text().trim());
}
@@ -134,12 +129,12 @@ private Properties getProperties(String value, Embedding embedding) {
@Produces(MediaType.APPLICATION_JSON)
@Path("similarity")
@Operation(
- summary = "Use the embedding model.",
- description = "Determine the similarity and relevance score of two sentences.",
- operationId = "similarity" )
+ summary = "Use the embedding model.",
+ description = "Determine the similarity and relevance score of two sentences.",
+ operationId = "similarity")
public Properties similarity(
- @QueryParam("text1") String text1,
- @QueryParam("text2") String text2) {
+ @QueryParam("text1") String text1,
+ @QueryParam("text2") String text2) {
HuggingFaceEmbeddingModel model = getEmbeddingModel();
diff --git a/javafx-example/pom.xml b/javafx-example/pom.xml
index 73d41fbf..0f8f4470 100644
--- a/javafx-example/pom.xml
+++ b/javafx-example/pom.xml
@@ -15,7 +15,7 @@
21.0.1
- 1.0.0-alpha1
+ 1.0.0-beta1
2.22.1
diff --git a/javafx-example/src/main/java/AnswerService.java b/javafx-example/src/main/java/AnswerService.java
index b38bb649..bf1ed1fe 100644
--- a/javafx-example/src/main/java/AnswerService.java
+++ b/javafx-example/src/main/java/AnswerService.java
@@ -38,8 +38,8 @@ void ask(SearchAction action) {
var responseHandler = new CustomStreamingResponseHandler(action);
assistant.chat(action.getQuestion())
- .onNext(responseHandler::onNext)
- .onComplete(responseHandler::onComplete)
+ .onPartialResponse(responseHandler::onNext)
+ .onCompleteResponse(responseHandler::onComplete)
.onError(responseHandler::onError)
.start();
}
diff --git a/javafx-example/src/main/java/CustomStreamingResponseHandler.java b/javafx-example/src/main/java/CustomStreamingResponseHandler.java
index f9954533..59466817 100644
--- a/javafx-example/src/main/java/CustomStreamingResponseHandler.java
+++ b/javafx-example/src/main/java/CustomStreamingResponseHandler.java
@@ -1,5 +1,4 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.output.Response;
+import dev.langchain4j.model.chat.response.ChatResponse;
import javafx.application.Platform;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -18,7 +17,7 @@ public void onNext(String token) {
Platform.runLater(() -> action.appendAnswer(token));
}
- public void onComplete(Response response) {
+ public void onComplete(ChatResponse response) {
Platform.runLater(() -> {
LOGGER.info("Complete response: " + response.toString());
LOGGER.info("Answer is complete for '" + action.getQuestion() + "', size: "
diff --git a/jlama-examples/pom.xml b/jlama-examples/pom.xml
index 9ba6aba6..1919c36f 100644
--- a/jlama-examples/pom.xml
+++ b/jlama-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
jlama-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
21
@@ -21,7 +21,7 @@
dev.langchain4j
langchain4j-jlama
- 1.0.0-alpha1
+ 1.0.0-beta1
com.github.tjake
@@ -47,7 +47,7 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/jlama-examples/src/main/java/JlamaBasicRagEmbedExamples.java b/jlama-examples/src/main/java/JlamaBasicRagEmbedExamples.java
index 468f6070..2b6ffd8e 100644
--- a/jlama-examples/src/main/java/JlamaBasicRagEmbedExamples.java
+++ b/jlama-examples/src/main/java/JlamaBasicRagEmbedExamples.java
@@ -86,7 +86,7 @@ public static void main(String[] args) {
.temperature(0.2f) // expect a more focused and deterministic answer
.build();
- AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
+ AiMessage aiMessage = chatModel.chat(prompt.toUserMessage()).aiMessage();
String answer = aiMessage.text();
System.out.println(answer); // According to Inca legend, the llamas were created by the mythical founders of the Inca Empire....
}
diff --git a/jlama-examples/src/main/java/JlamaChatModelExamples.java b/jlama-examples/src/main/java/JlamaChatModelExamples.java
index 3a039fb4..37ddb6cd 100644
--- a/jlama-examples/src/main/java/JlamaChatModelExamples.java
+++ b/jlama-examples/src/main/java/JlamaChatModelExamples.java
@@ -1,6 +1,7 @@
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
+import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.jlama.JlamaChatModel;
public class JlamaChatModelExamples {
@@ -14,13 +15,12 @@ public static void main(String[] args) {
.temperature(0.3f)
.build();
- String response = model.generate(
- SystemMessage.from("You are helpful chatbot who is a java expert."),
- UserMessage.from("Write a java program to print hello world."))
- .content()
- .text();
+ ChatResponse chatResponse = model.chat(
+ SystemMessage.from("You are helpful chatbot who is a java expert."),
+ UserMessage.from("Write a java program to print hello world.")
+ );
- System.out.println("\n" + response + "\n");
+ System.out.println("\n" + chatResponse.aiMessage().text() + "\n");
}
}
}
diff --git a/jlama-examples/src/main/java/JlamaStreamingChatModelExamples.java b/jlama-examples/src/main/java/JlamaStreamingChatModelExamples.java
index 9588ee7d..6be386ac 100644
--- a/jlama-examples/src/main/java/JlamaStreamingChatModelExamples.java
+++ b/jlama-examples/src/main/java/JlamaStreamingChatModelExamples.java
@@ -1,11 +1,10 @@
-import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.UserMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.jlama.JlamaStreamingChatModel;
-import dev.langchain4j.model.output.Response;
import java.util.List;
import java.util.concurrent.CompletableFuture;
@@ -15,7 +14,7 @@ public class JlamaStreamingChatModelExamples {
static class Simple_Streaming_Prompt {
public static void main(String[] args) {
- CompletableFuture> futureResponse = new CompletableFuture<>();
+ CompletableFuture futureResponse = new CompletableFuture<>();
StreamingChatLanguageModel model = JlamaStreamingChatModel.builder()
.modelName("tjake/Llama-3.2-1B-Instruct-JQ4")
@@ -26,15 +25,16 @@ public static void main(String[] args) {
SystemMessage.from("You are a helpful chatbot that answers questions in under 30 words."),
UserMessage.from("What is the best part of France and why?"));
- model.generate(messages, new StreamingResponseHandler<>() {
+ model.chat(messages, new StreamingChatResponseHandler() {
+
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
- futureResponse.complete(response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ futureResponse.complete(completeResponse);
}
@Override
diff --git a/mcp-example/pom.xml b/mcp-example/pom.xml
index a5f434c0..207514ac 100644
--- a/mcp-example/pom.xml
+++ b/mcp-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
mcp-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-mcp
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-open-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/milvus-example/pom.xml b/milvus-example/pom.xml
index 9cbaf7b1..ea8b0fb6 100644
--- a/milvus-example/pom.xml
+++ b/milvus-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
milvus-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-milvus
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/mistral-ai-examples/pom.xml b/mistral-ai-examples/pom.xml
index c3453cda..b8712f64 100644
--- a/mistral-ai-examples/pom.xml
+++ b/mistral-ai-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
mistral-ai-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-mistral-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/mistral-ai-examples/src/main/java/MistralAiBasicRagEmbedExamples.java b/mistral-ai-examples/src/main/java/MistralAiBasicRagEmbedExamples.java
index e0de20e0..f019c512 100644
--- a/mistral-ai-examples/src/main/java/MistralAiBasicRagEmbedExamples.java
+++ b/mistral-ai-examples/src/main/java/MistralAiBasicRagEmbedExamples.java
@@ -95,7 +95,7 @@ public static void main(String[] args) {
.logResponses(true)
.build();
- AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
+ AiMessage aiMessage = chatModel.chat(prompt.toUserMessage()).aiMessage();
String answer = aiMessage.text();
System.out.println(answer); // According to Inca legend, the llamas were created by the mythical founders of the Inca Empire....
}
diff --git a/mistral-ai-examples/src/main/java/MistralAiChatModelExamples.java b/mistral-ai-examples/src/main/java/MistralAiChatModelExamples.java
index ff16bddf..b0bb52d4 100644
--- a/mistral-ai-examples/src/main/java/MistralAiChatModelExamples.java
+++ b/mistral-ai-examples/src/main/java/MistralAiChatModelExamples.java
@@ -16,7 +16,7 @@ public static void main(String[] args) {
.logResponses(true)
.build();
- String joke = model.generate("Tell me a joke about Java");
+ String joke = model.chat("Tell me a joke about Java");
System.out.println(joke);
}
diff --git a/mistral-ai-examples/src/main/java/MistralAiFunctionCallingExamples.java b/mistral-ai-examples/src/main/java/MistralAiFunctionCallingExamples.java
index 2f14b2a7..fce016e0 100644
--- a/mistral-ai-examples/src/main/java/MistralAiFunctionCallingExamples.java
+++ b/mistral-ai-examples/src/main/java/MistralAiFunctionCallingExamples.java
@@ -5,6 +5,8 @@
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
+import dev.langchain4j.model.chat.request.ChatRequest;
+import dev.langchain4j.model.chat.request.ChatRequestParameters;
import dev.langchain4j.model.mistralai.MistralAiChatModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.SystemMessage;
@@ -80,8 +82,14 @@ public static void main(String[] args) throws Exception {
chatMessages.add(userMessage);
// STEP 2: Model generate function arguments
- // Tool_choice: With multiple tools it's set to "auto" by default.
- AiMessage aiMessage = mistralAiModel.generate(chatMessages, tools).content();
+ // Tool_choice: it's set to "auto" by default.
+ ChatRequest chatRequest = ChatRequest.builder()
+ .messages(chatMessages)
+ .parameters(ChatRequestParameters.builder()
+ .toolSpecifications(tools)
+ .build())
+ .build();
+ AiMessage aiMessage = mistralAiModel.chat(chatRequest).aiMessage();
aiMessage.toolExecutionRequests().forEach(toolSpec -> { // return all tools to call to answer the user query
System.out.println("Function name: " + toolSpec.name());
System.out.println("Function args:" + toolSpec.arguments());
@@ -94,7 +102,7 @@ public static void main(String[] args) throws Exception {
chatMessages.addAll(toolExecutionResultMessages);
// STEP 4: Model generate final response
- AiMessage finalResponse = mistralAiModel.generate(chatMessages).content();
+ AiMessage finalResponse = mistralAiModel.chat(chatMessages).aiMessage();
System.out.println(finalResponse.text()); //According to the payment data, the payment status of transaction T1005 is Pending.
}
diff --git a/mistral-ai-examples/src/main/java/MistralAiStreamingChatModelExamples.java b/mistral-ai-examples/src/main/java/MistralAiStreamingChatModelExamples.java
index 68c93cc1..4ab3c776 100644
--- a/mistral-ai-examples/src/main/java/MistralAiStreamingChatModelExamples.java
+++ b/mistral-ai-examples/src/main/java/MistralAiStreamingChatModelExamples.java
@@ -1,7 +1,6 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.mistralai.MistralAiStreamingChatModel;
-import dev.langchain4j.model.output.Response;
import java.util.concurrent.CompletableFuture;
@@ -22,17 +21,18 @@ public static void main(String[] args) {
String userMessage = "Write a 100-word poem about Java and AI";
- CompletableFuture> futureResponse = new CompletableFuture<>();
- model.generate(userMessage, new StreamingResponseHandler() {
+ CompletableFuture futureResponse = new CompletableFuture<>();
+
+ model.chat(userMessage, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
- futureResponse.complete(response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ futureResponse.complete(completeResponse);
}
@Override
diff --git a/native-java-gemini-function-calling-example/pom.xml b/native-java-gemini-function-calling-example/pom.xml
index 77436de3..2b78db1b 100644
--- a/native-java-gemini-function-calling-example/pom.xml
+++ b/native-java-gemini-function-calling-example/pom.xml
@@ -18,7 +18,7 @@
21
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/neo4j-example/pom.xml b/neo4j-example/pom.xml
index 63fe3bee..afc12c9a 100644
--- a/neo4j-example/pom.xml
+++ b/neo4j-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
neo4j-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-neo4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/ollama-examples/pom.xml b/ollama-examples/pom.xml
index 1ee5adfd..b9c7ede4 100644
--- a/ollama-examples/pom.xml
+++ b/ollama-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
ollama-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-ollama
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/ollama-examples/src/main/java/OllamaChatModelTest.java b/ollama-examples/src/main/java/OllamaChatModelTest.java
index f993abf2..2a8b4f56 100644
--- a/ollama-examples/src/main/java/OllamaChatModelTest.java
+++ b/ollama-examples/src/main/java/OllamaChatModelTest.java
@@ -3,6 +3,7 @@
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.request.ChatRequest;
+import dev.langchain4j.model.chat.request.ChatRequestParameters;
import dev.langchain4j.model.chat.request.ResponseFormat;
import dev.langchain4j.model.chat.request.ResponseFormatType;
import dev.langchain4j.model.chat.request.json.JsonObjectSchema;
@@ -80,19 +81,25 @@ void json_schema_with_low_level_chat_api_example() {
.logRequests(true)
.build();
- ChatRequest chatRequest = ChatRequest.builder()
- .messages(UserMessage.from("John Doe is 42 years old"))
- .responseFormat(ResponseFormat.builder()
- .type(ResponseFormatType.JSON)
- .jsonSchema(JsonSchema.builder()
- .rootElement(JsonObjectSchema.builder()
- .addStringProperty("name")
- .addIntegerProperty("age")
- .build())
+ ResponseFormat responseFormat = ResponseFormat.builder()
+ .type(ResponseFormatType.JSON)
+ .jsonSchema(JsonSchema.builder()
+ .rootElement(JsonObjectSchema.builder()
+ .addStringProperty("name")
+ .addIntegerProperty("age")
.build())
.build())
.build();
+ ChatRequestParameters parameters = ChatRequestParameters.builder()
+ .responseFormat(responseFormat)
+ .build();
+
+ ChatRequest chatRequest = ChatRequest.builder()
+ .messages(UserMessage.from("John Doe is 42 years old"))
+ .parameters(parameters)
+ .build();
+
ChatResponse chatResponse = chatModel.chat(chatRequest);
System.out.println(chatResponse);
diff --git a/ollama-examples/src/main/java/OllamaStreamingChatModelTest.java b/ollama-examples/src/main/java/OllamaStreamingChatModelTest.java
index 56ca7a4a..dc865e46 100644
--- a/ollama-examples/src/main/java/OllamaStreamingChatModelTest.java
+++ b/ollama-examples/src/main/java/OllamaStreamingChatModelTest.java
@@ -1,8 +1,7 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.ollama.OllamaStreamingChatModel;
-import dev.langchain4j.model.output.Response;
import org.junit.jupiter.api.Test;
import org.testcontainers.junit.jupiter.Testcontainers;
import utils.AbstractOllamaInfrastructure;
@@ -30,17 +29,18 @@ void streaming_example() {
String userMessage = "Write a 100-word poem about Java and AI";
- CompletableFuture> futureResponse = new CompletableFuture<>();
- model.generate(userMessage, new StreamingResponseHandler<>() {
+ CompletableFuture futureResponse = new CompletableFuture<>();
+
+ model.chat(userMessage, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
- futureResponse.complete(response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ futureResponse.complete(completeResponse);
}
@Override
diff --git a/open-ai-examples/pom.xml b/open-ai-examples/pom.xml
index ff5df752..d078cdcc 100644
--- a/open-ai-examples/pom.xml
+++ b/open-ai-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
open-ai-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,19 +19,19 @@
dev.langchain4j
langchain4j-open-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/open-ai-examples/src/main/java/OpenAiChatModelExamples.java b/open-ai-examples/src/main/java/OpenAiChatModelExamples.java
index 2ce30524..fddaeb0a 100644
--- a/open-ai-examples/src/main/java/OpenAiChatModelExamples.java
+++ b/open-ai-examples/src/main/java/OpenAiChatModelExamples.java
@@ -42,11 +42,7 @@ public static void main(String[] args) {
ImageContent.from("https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png")
);
- ChatRequest chatRequest = ChatRequest.builder()
- .messages(userMessage)
- .build();
-
- ChatResponse chatResponse = chatModel.chat(chatRequest);
+ ChatResponse chatResponse = chatModel.chat(userMessage);
System.out.println(chatResponse.aiMessage().text());
}
diff --git a/open-ai-examples/src/main/java/OpenAiStreamingChatModelExamples.java b/open-ai-examples/src/main/java/OpenAiStreamingChatModelExamples.java
index ec99a727..db2e692f 100644
--- a/open-ai-examples/src/main/java/OpenAiStreamingChatModelExamples.java
+++ b/open-ai-examples/src/main/java/OpenAiStreamingChatModelExamples.java
@@ -1,6 +1,4 @@
-import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
-import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
@@ -16,11 +14,7 @@ public static void main(String[] args) {
.modelName(GPT_4_O_MINI)
.build();
- ChatRequest chatRequest = ChatRequest.builder()
- .messages(UserMessage.from("Tell me a joke about Java"))
- .build();
-
- chatModel.chat(chatRequest, new StreamingChatResponseHandler() {
+ chatModel.chat("Tell me a joke about Java", new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String partialResponse) {
diff --git a/opensearch-example/pom.xml b/opensearch-example/pom.xml
index 3e45e524..ca4bca04 100644
--- a/opensearch-example/pom.xml
+++ b/opensearch-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
opensearch-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-opensearch
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/oracle-example/pom.xml b/oracle-example/pom.xml
index 6b22d09a..29208533 100644
--- a/oracle-example/pom.xml
+++ b/oracle-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
oracle-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -26,13 +26,13 @@
dev.langchain4j
langchain4j-oracle
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2-q
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/other-examples/pom.xml b/other-examples/pom.xml
index fe314164..9de8147d 100644
--- a/other-examples/pom.xml
+++ b/other-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
other-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,49 +19,49 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-open-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-hugging-face
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-vertex-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-document-parser-apache-tika
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-code-execution-engine-judge0
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/other-examples/src/main/java/ChatMemoryExamples.java b/other-examples/src/main/java/ChatMemoryExamples.java
index 8c652e27..e9d17114 100644
--- a/other-examples/src/main/java/ChatMemoryExamples.java
+++ b/other-examples/src/main/java/ChatMemoryExamples.java
@@ -30,12 +30,12 @@ public static void main(String[] args) {
// You can process/modify the message before saving if required.
chatMemory.add(userMessage("Hello, my name is Klaus"));
- AiMessage answer = model.generate(chatMemory.messages()).content();
+ AiMessage answer = model.chat(chatMemory.messages()).aiMessage();
System.out.println(answer.text()); // Hello Klaus! How can I assist you today?
chatMemory.add(answer);
chatMemory.add(userMessage("What is my name?"));
- AiMessage answerWithName = model.generate(chatMemory.messages()).content();
+ AiMessage answerWithName = model.chat(chatMemory.messages()).aiMessage();
System.out.println(answerWithName.text()); // Your name is Klaus.
chatMemory.add(answerWithName);
}
diff --git a/other-examples/src/main/java/HelloWorldExample.java b/other-examples/src/main/java/HelloWorldExample.java
index ca26f0f3..a8012b9c 100644
--- a/other-examples/src/main/java/HelloWorldExample.java
+++ b/other-examples/src/main/java/HelloWorldExample.java
@@ -14,7 +14,7 @@ public static void main(String[] args) {
.build();
// Start interacting
- String answer = model.generate("Hello world!");
+ String answer = model.chat("Hello world!");
System.out.println(answer); // Hello! How can I assist you today?
}
diff --git a/other-examples/src/main/java/ProxyExample.java b/other-examples/src/main/java/ProxyExample.java
index e64df100..fcda2f75 100644
--- a/other-examples/src/main/java/ProxyExample.java
+++ b/other-examples/src/main/java/ProxyExample.java
@@ -17,7 +17,7 @@ public static void main(String[] args) {
.proxy(new Proxy(HTTP, new InetSocketAddress("39.175.77.7", 30001)))
.build();
- String answer = model.generate("hello");
+ String answer = model.chat("hello");
System.out.println(answer);
}
}
diff --git a/other-examples/src/main/java/ServiceWithStreamingExample.java b/other-examples/src/main/java/ServiceWithStreamingExample.java
index 09cd71c2..26a4a3ae 100644
--- a/other-examples/src/main/java/ServiceWithStreamingExample.java
+++ b/other-examples/src/main/java/ServiceWithStreamingExample.java
@@ -24,8 +24,8 @@ public static void main(String[] args) {
TokenStream tokenStream = assistant.chat("Tell me a joke");
- tokenStream.onNext(System.out::println)
- .onComplete(System.out::println)
+ tokenStream.onPartialResponse(System.out::println)
+ .onCompleteResponse(System.out::println)
.onError(Throwable::printStackTrace)
.start();
}
diff --git a/other-examples/src/main/java/StreamingExamples.java b/other-examples/src/main/java/StreamingExamples.java
index fd73e693..1b38db61 100644
--- a/other-examples/src/main/java/StreamingExamples.java
+++ b/other-examples/src/main/java/StreamingExamples.java
@@ -1,7 +1,8 @@
-import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.language.StreamingLanguageModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingLanguageModel;
@@ -32,16 +33,16 @@ public static void main(String[] args) {
userMessage("Tell me a joke")
);
- model.generate(messages, new StreamingResponseHandler() {
+ model.chat(messages, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.println("New token: '" + token + "'");
+ public void onPartialResponse(String partialResponse) {
+ System.out.println("New token: '" + partialResponse + "'");
}
@Override
- public void onComplete(Response response) {
- System.out.println("Streaming completed: " + response);
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ System.out.println("Streaming completed: " + completeResponse);
}
@Override
@@ -62,7 +63,7 @@ public static void main(String[] args) {
.modelName(GPT_3_5_TURBO_INSTRUCT)
.build();
- model.generate("Tell me a joke", new StreamingResponseHandler() {
+ model.generate("Tell me a joke", new StreamingResponseHandler<>() {
@Override
public void onNext(String token) {
diff --git a/other-examples/src/main/java/StructuredPromptTemplateExamples.java b/other-examples/src/main/java/StructuredPromptTemplateExamples.java
index 3afb174b..74b65be7 100644
--- a/other-examples/src/main/java/StructuredPromptTemplateExamples.java
+++ b/other-examples/src/main/java/StructuredPromptTemplateExamples.java
@@ -35,7 +35,7 @@ public static void main(String[] args) {
createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives");
Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt);
- AiMessage aiMessage = model.generate(prompt.toUserMessage()).content();
+ AiMessage aiMessage = model.chat(prompt.toUserMessage()).aiMessage();
System.out.println(aiMessage.text());
}
}
@@ -71,7 +71,7 @@ public static void main(String[] args) {
createRecipePrompt.ingredients = asList("cucumber", "tomato", "feta", "onion", "olives");
Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt);
- AiMessage aiMessage = model.generate(prompt.toUserMessage()).content();
+ AiMessage aiMessage = model.chat(prompt.toUserMessage()).aiMessage();
System.out.println(aiMessage.text());
}
}
diff --git a/ovh-ai-examples/pom.xml b/ovh-ai-examples/pom.xml
index e2dbfa2c..ded55001 100644
--- a/ovh-ai-examples/pom.xml
+++ b/ovh-ai-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
ovh-ai-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-ovh-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/pgvector-example/pom.xml b/pgvector-example/pom.xml
index 8714e934..372166ea 100644
--- a/pgvector-example/pom.xml
+++ b/pgvector-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
pgvector-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,7 +19,7 @@
dev.langchain4j
langchain4j-pgvector
- 1.0.0-alpha1
+ 1.0.0-beta1
@@ -31,7 +31,7 @@
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/pinecone-example/pom.xml b/pinecone-example/pom.xml
index ef630423..c95c7e9e 100644
--- a/pinecone-example/pom.xml
+++ b/pinecone-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
pinecone-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-pinecone
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/pom.xml b/pom.xml
index 5bc3e834..754a00b6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
langchain4j-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
pom
diff --git a/qdrant-example/pom.xml b/qdrant-example/pom.xml
index e77e70cb..94fe27bd 100644
--- a/qdrant-example/pom.xml
+++ b/qdrant-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
qdrant-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-qdrant
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/rag-examples/pom.xml b/rag-examples/pom.xml
index 973b3039..1d13c2ec 100644
--- a/rag-examples/pom.xml
+++ b/rag-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
rag-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,37 +19,37 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-easy-rag
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-open-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-cohere
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-web-search-engine-tavily
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-experimental-sql
- 1.0.0-alpha1
+ 1.0.0-beta1
@@ -61,13 +61,13 @@
dev.langchain4j
langchain4j-embeddings-bge-small-en-v15-q
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embedding-store-filter-parser-sql
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/rag-examples/src/main/java/_3_advanced/_06_Advanced_RAG_Skip_Retrieval_Example.java b/rag-examples/src/main/java/_3_advanced/_06_Advanced_RAG_Skip_Retrieval_Example.java
index 57deddfc..efbce569 100644
--- a/rag-examples/src/main/java/_3_advanced/_06_Advanced_RAG_Skip_Retrieval_Example.java
+++ b/rag-examples/src/main/java/_3_advanced/_06_Advanced_RAG_Skip_Retrieval_Example.java
@@ -106,7 +106,7 @@ public Collection route(Query query) {
Prompt prompt = PROMPT_TEMPLATE.apply(query.text());
- AiMessage aiMessage = chatLanguageModel.generate(prompt.toUserMessage()).content();
+ AiMessage aiMessage = chatLanguageModel.chat(prompt.toUserMessage()).aiMessage();
System.out.println("LLM decided: " + aiMessage.text());
if (aiMessage.text().toLowerCase().contains("no")) {
diff --git a/rag-examples/src/main/java/_4_low_level/_01_Low_Level_Naive_RAG_Example.java b/rag-examples/src/main/java/_4_low_level/_01_Low_Level_Naive_RAG_Example.java
index 24444e2c..4b712414 100644
--- a/rag-examples/src/main/java/_4_low_level/_01_Low_Level_Naive_RAG_Example.java
+++ b/rag-examples/src/main/java/_4_low_level/_01_Low_Level_Naive_RAG_Example.java
@@ -98,7 +98,7 @@ public static void main(String[] args) {
.modelName(GPT_4_O_MINI)
.timeout(Duration.ofSeconds(60))
.build();
- AiMessage aiMessage = chatModel.generate(prompt.toUserMessage()).content();
+ AiMessage aiMessage = chatModel.chat(prompt.toUserMessage()).aiMessage();
// See an answer from the model
String answer = aiMessage.text();
diff --git a/redis-example/pom.xml b/redis-example/pom.xml
index ddf7c250..47b567fb 100644
--- a/redis-example/pom.xml
+++ b/redis-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
redis-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -25,7 +25,7 @@
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/spring-boot-example/pom.xml b/spring-boot-example/pom.xml
index 02649d57..489e6a5d 100644
--- a/spring-boot-example/pom.xml
+++ b/spring-boot-example/pom.xml
@@ -12,7 +12,7 @@
dev.langchain4j
spring-boot-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -23,13 +23,13 @@
dev.langchain4j
langchain4j-spring-boot-starter
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-open-ai-spring-boot-starter
- 1.0.0-alpha1
+ 1.0.0-beta1
@@ -41,7 +41,7 @@
dev.langchain4j
langchain4j-reactor
- 1.0.0-alpha1
+ 1.0.0-beta1
org.springframework.boot
diff --git a/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java b/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java
index 84a59668..4ea93f7e 100644
--- a/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java
+++ b/spring-boot-example/src/main/java/dev/langchain4j/example/lowlevel/ChatLanguageModelController.java
@@ -19,6 +19,6 @@ class ChatLanguageModelController {
@GetMapping("/model")
public String model(@RequestParam(value = "message", defaultValue = "Hello") String message) {
- return chatLanguageModel.generate(message);
+ return chatLanguageModel.chat(message);
}
}
diff --git a/tutorials/README.md b/tutorials/README.md
index 163645cc..a3502f84 100644
--- a/tutorials/README.md
+++ b/tutorials/README.md
@@ -10,7 +10,7 @@ mvn -Pcomplete package
2. Run individual apps, such as `_00_HelloWorld`, with the following command:
```shell
-java -cp ./target/tutorials-1.0.0-alpha1-jar-with-dependencies.jar _00_HelloWorld "what is Java?"
+java -cp ./target/tutorials-1.0.0-beta1-jar-with-dependencies.jar _00_HelloWorld "what is Java?"
```
## Running the examples as GraalVM native images
@@ -18,7 +18,7 @@ java -cp ./target/tutorials-1.0.0-alpha1-jar-with-dependencies.jar _00_HelloWorl
In case you want to produce a native executable version of your app, same command as above works with `native-image`:
```shell
-native-image -cp ./target/tutorials-1.0.0-alpha1-jar-with-dependencies.jar _00_HelloWorld -o native-helloworld
+native-image -cp ./target/tutorials-1.0.0-beta1-jar-with-dependencies.jar _00_HelloWorld -o native-helloworld
```
diff --git a/tutorials/pom.xml b/tutorials/pom.xml
index 94bfcf93..cfc891b9 100644
--- a/tutorials/pom.xml
+++ b/tutorials/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
tutorials
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,25 +19,25 @@
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-open-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-code-execution-engine-judge0
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/tutorials/src/main/java/_00_HelloWorld.java b/tutorials/src/main/java/_00_HelloWorld.java
index f7ed5d46..efdc01f4 100644
--- a/tutorials/src/main/java/_00_HelloWorld.java
+++ b/tutorials/src/main/java/_00_HelloWorld.java
@@ -12,7 +12,7 @@ public static void main(String[] args) {
.modelName(GPT_4_O_MINI)
.build();
- String answer = model.generate("Say Hello World");
+ String answer = model.chat("Say Hello World");
System.out.println(answer);
}
diff --git a/tutorials/src/main/java/_01_ModelParameters.java b/tutorials/src/main/java/_01_ModelParameters.java
index 61c427d4..4e882df5 100644
--- a/tutorials/src/main/java/_01_ModelParameters.java
+++ b/tutorials/src/main/java/_01_ModelParameters.java
@@ -21,7 +21,7 @@ public static void main(String[] args) {
String prompt = "Explain in three lines how to make a beautiful painting";
- String response = model.generate(prompt);
+ String response = model.chat(prompt);
System.out.println(response);
}
diff --git a/tutorials/src/main/java/_03_PromptTemplate.java b/tutorials/src/main/java/_03_PromptTemplate.java
index 24e8f0b5..bc3da5d0 100644
--- a/tutorials/src/main/java/_03_PromptTemplate.java
+++ b/tutorials/src/main/java/_03_PromptTemplate.java
@@ -34,7 +34,7 @@ public static void main(String[] args) {
Prompt prompt = promptTemplate.apply(variables);
- String response = model.generate(prompt.text());
+ String response = model.chat(prompt.text());
System.out.println(response);
}
@@ -84,7 +84,7 @@ public static void main(String[] args) {
Prompt prompt = StructuredPromptProcessor.toPrompt(createRecipePrompt);
- String recipe = model.generate(prompt.text());
+ String recipe = model.chat(prompt.text());
System.out.println(recipe);
}
diff --git a/tutorials/src/main/java/_04_Streaming.java b/tutorials/src/main/java/_04_Streaming.java
index 0ef48e39..36ae446d 100644
--- a/tutorials/src/main/java/_04_Streaming.java
+++ b/tutorials/src/main/java/_04_Streaming.java
@@ -1,8 +1,6 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
-import dev.langchain4j.model.openai.OpenAiChatModelName;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
-import dev.langchain4j.model.output.Response;
import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_4_O_MINI;
@@ -20,15 +18,15 @@ public static void main(String[] args) {
System.out.println("Nr of chars: " + prompt.length());
System.out.println("Nr of tokens: " + model.estimateTokenCount(prompt));
- model.generate(prompt, new StreamingResponseHandler() {
+ model.chat(prompt, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
+ public void onCompleteResponse(ChatResponse completeResponse) {
System.out.println("\n\nDone streaming");
}
@@ -37,6 +35,5 @@ public void onError(Throwable error) {
System.out.println("Something went wrong: " + error.getMessage());
}
});
-
}
}
diff --git a/tutorials/src/main/java/_05_Memory.java b/tutorials/src/main/java/_05_Memory.java
index 978e7369..72049380 100644
--- a/tutorials/src/main/java/_05_Memory.java
+++ b/tutorials/src/main/java/_05_Memory.java
@@ -3,10 +3,10 @@
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
-import dev.langchain4j.model.StreamingResponseHandler;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenizer;
-import dev.langchain4j.model.output.Response;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
@@ -42,16 +42,16 @@ public static void main(String[] args) throws ExecutionException, InterruptedExc
CompletableFuture futureAiMessage = new CompletableFuture<>();
- StreamingResponseHandler handler = new StreamingResponseHandler() {
+ StreamingChatResponseHandler handler = new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
}
@Override
- public void onComplete(Response response) {
- futureAiMessage.complete(response.content());
+ public void onCompleteResponse(ChatResponse completeResponse) {
+ futureAiMessage.complete(completeResponse.aiMessage());
}
@Override
@@ -59,7 +59,7 @@ public void onError(Throwable throwable) {
}
};
- model.generate(chatMemory.messages(), handler);
+ model.chat(chatMemory.messages(), handler);
chatMemory.add(futureAiMessage.get());
UserMessage userMessage2 = userMessage(
@@ -70,6 +70,6 @@ public void onError(Throwable throwable) {
System.out.println("\n\n[User]: " + userMessage2.text());
System.out.print("[LLM]: ");
- model.generate(chatMemory.messages(), handler);
+ model.chat(chatMemory.messages(), handler);
}
}
diff --git a/tutorials/src/main/java/_06_FewShot.java b/tutorials/src/main/java/_06_FewShot.java
index c9911907..ca30011f 100644
--- a/tutorials/src/main/java/_06_FewShot.java
+++ b/tutorials/src/main/java/_06_FewShot.java
@@ -1,7 +1,8 @@
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import java.util.ArrayList;
@@ -54,15 +55,20 @@ public static void main(String[] args) {
System.out.println("[User]: " + customerComplaint.text());
System.out.print("[LLM]: ");
- model.generate(fewShotHistory, new StreamingResponseHandler() {
+ model.chat(fewShotHistory, new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
+ }
+
+ @Override
+ public void onCompleteResponse(ChatResponse completeResponse) {
}
@Override
public void onError(Throwable throwable) {
+ throwable.printStackTrace();
}
});
diff --git a/vertex-ai-gemini-examples/pom.xml b/vertex-ai-gemini-examples/pom.xml
index 8f7c874e..afab8f38 100644
--- a/vertex-ai-gemini-examples/pom.xml
+++ b/vertex-ai-gemini-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
vertex-ai-gemini-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-vertex-ai-gemini
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelExamples.java b/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelExamples.java
index a505798c..3f79e201 100644
--- a/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelExamples.java
+++ b/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelExamples.java
@@ -1,7 +1,7 @@
-import dev.langchain4j.data.message.AiMessage;
-import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
+import dev.langchain4j.model.chat.response.ChatResponse;
+import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel;
import dev.langchain4j.model.vertexai.VertexAiGeminiStreamingChatModel;
@@ -25,7 +25,7 @@ public static void main(String[] args) {
.modelName(MODEL_NAME)
.build();
- String response = model.generate("Tell me a joke");
+ String response = model.chat("Tell me a joke");
System.out.println(response);
}
@@ -41,11 +41,15 @@ public static void main(String[] args) {
.modelName(MODEL_NAME)
.build();
- model.generate("Tell me a long joke", new StreamingResponseHandler() {
+ model.chat("Tell me a long joke", new StreamingChatResponseHandler() {
@Override
- public void onNext(String token) {
- System.out.print(token);
+ public void onPartialResponse(String partialResponse) {
+ System.out.print(partialResponse);
+ }
+
+ @Override
+ public void onCompleteResponse(ChatResponse completeResponse) {
}
@Override
diff --git a/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelTest.java b/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelTest.java
index c4363ad4..d9fd72e8 100644
--- a/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelTest.java
+++ b/vertex-ai-gemini-examples/src/main/java/VertexAiGeminiChatModelTest.java
@@ -6,14 +6,15 @@
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
-import dev.langchain4j.model.output.Response;
+import dev.langchain4j.model.chat.request.ChatRequest;
+import dev.langchain4j.model.chat.request.ChatRequestParameters;
+import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel;
import dev.langchain4j.service.AiServices;
import org.junit.jupiter.api.Test;
import java.util.List;
-import static java.util.Collections.singletonList;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.*;
@@ -48,11 +49,16 @@ void Low_level_Tools_Example() {
List toolSpecifications = ToolSpecifications.toolSpecificationsFrom(new Calculator());
- UserMessage userMessage = UserMessage.from("How much is 754 + 926?");
+ ChatRequest chatRequest = ChatRequest.builder()
+ .messages(UserMessage.from("How much is 754 + 926?"))
+ .parameters(ChatRequestParameters.builder()
+ .toolSpecifications(toolSpecifications)
+ .build())
+ .build();
- Response response = model.generate(singletonList(userMessage), toolSpecifications);
+ ChatResponse chatResponse = model.chat(chatRequest);
- AiMessage aiMessage = response.content();
+ AiMessage aiMessage = chatResponse.aiMessage();
assertThat(aiMessage.hasToolExecutionRequests()).isTrue();
assertThat(aiMessage.toolExecutionRequests()).hasSize(1);
diff --git a/vespa-example/pom.xml b/vespa-example/pom.xml
index 6082e985..b84bb125 100644
--- a/vespa-example/pom.xml
+++ b/vespa-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
vespa-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-vespa
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/voyage-ai-examples/pom.xml b/voyage-ai-examples/pom.xml
index 0a6510fb..e674683e 100644
--- a/voyage-ai-examples/pom.xml
+++ b/voyage-ai-examples/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
voyage-ai-examples
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,7 +19,7 @@
dev.langchain4j
langchain4j-voyage-ai
- 1.0.0-alpha1
+ 1.0.0-beta1
diff --git a/weaviate-example/pom.xml b/weaviate-example/pom.xml
index a2b53c7c..8fe1085c 100644
--- a/weaviate-example/pom.xml
+++ b/weaviate-example/pom.xml
@@ -6,7 +6,7 @@
dev.langchain4j
weaviate-example
- 1.0.0-alpha1
+ 1.0.0-beta1
17
@@ -19,13 +19,13 @@
dev.langchain4j
langchain4j-weaviate
- 1.0.0-alpha1
+ 1.0.0-beta1
dev.langchain4j
langchain4j-embeddings-all-minilm-l6-v2
- 1.0.0-alpha1
+ 1.0.0-beta1