diff --git a/.github/workflows/_test.yml b/.github/workflows/_test.yml
index 4316fc407e5209..5471ed76cbd87c 100644
--- a/.github/workflows/_test.yml
+++ b/.github/workflows/_test.yml
@@ -59,7 +59,8 @@ jobs:
env:
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
run: |
- poetry run pip install $MIN_VERSIONS
+ poetry run pip install uv
+ poetry run uv pip install $MIN_VERSIONS
make tests
working-directory: ${{ inputs.working-directory }}
diff --git a/docs/docs/concepts/streaming.mdx b/docs/docs/concepts/streaming.mdx
index 34dbd891ca521e..1f613c07dab312 100644
--- a/docs/docs/concepts/streaming.mdx
+++ b/docs/docs/concepts/streaming.mdx
@@ -39,7 +39,7 @@ In some cases, you may need to stream **custom data** that goes beyond the infor
## Streaming APIs
-LangChain two main APIs for streaming output in real-time. These APIs are supported by any component that implements the [Runnable Interface](/docs/concepts/runnables), including [LLMs](/docs/concepts/chat_models), [compiled LangGraph graphs](https://langchain-ai.github.io/langgraph/concepts/low_level/), and any Runnable generated with [LCEL](/docs/concepts/lcel).
+LangChain has two main APIs for streaming output in real-time. These APIs are supported by any component that implements the [Runnable Interface](/docs/concepts/runnables), including [LLMs](/docs/concepts/chat_models), [compiled LangGraph graphs](https://langchain-ai.github.io/langgraph/concepts/low_level/), and any Runnable generated with [LCEL](/docs/concepts/lcel).
1. sync [stream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) and async [astream](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream): Use to stream outputs from individual Runnables (e.g., a chat model) as they are generated or stream any workflow created with LangGraph.
2. The async only [astream_events](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.astream_events): Use this API to get access to custom events and intermediate outputs from LLM applications built entirely with [LCEL](/docs/concepts/lcel). Note that this API is available, but not needed when working with LangGraph.
diff --git a/docs/docs/how_to/custom_tools.ipynb b/docs/docs/how_to/custom_tools.ipynb
index 8becb03c7c91ca..4225569ab61758 100644
--- a/docs/docs/how_to/custom_tools.ipynb
+++ b/docs/docs/how_to/custom_tools.ipynb
@@ -169,7 +169,7 @@
" return a * max(b)\n",
"\n",
"\n",
- "multiply_by_max.args_schema.schema()"
+ "print(multiply_by_max.args_schema.model_json_schema())"
]
},
{
@@ -285,7 +285,7 @@
" return bar\n",
"\n",
"\n",
- "foo.args_schema.schema()"
+ "print(foo.args_schema.model_json_schema())"
]
},
{
diff --git a/docs/docs/how_to/installation.mdx b/docs/docs/how_to/installation.mdx
index 081fe32fd85463..871ce87bd71ce8 100644
--- a/docs/docs/how_to/installation.mdx
+++ b/docs/docs/how_to/installation.mdx
@@ -31,7 +31,7 @@ By default, the dependencies needed to do that are NOT installed. You will need
## Ecosystem packages
With the exception of the `langsmith` SDK, all packages in the LangChain ecosystem depend on `langchain-core`, which contains base
-classes and abstractions that other packages use. The dependency graph below shows how the difference packages are related.
+classes and abstractions that other packages use. The dependency graph below shows how the different packages are related.
A directed arrow indicates that the source package depends on the target package:
![](/img/ecosystem_packages.png)
@@ -115,4 +115,4 @@ If you want to install a package from source, you can do so by cloning the [main
pip install -e .
```
-LangGraph, LangSmith SDK, and certain integration packages live outside the main LangChain repo. You can see [all repos here](https://github.com/langchain-ai).
\ No newline at end of file
+LangGraph, LangSmith SDK, and certain integration packages live outside the main LangChain repo. You can see [all repos here](https://github.com/langchain-ai).
diff --git a/docs/docs/how_to/multi_vector.ipynb b/docs/docs/how_to/multi_vector.ipynb
index a68086b14fa2d2..e4c1b1020fdc3a 100644
--- a/docs/docs/how_to/multi_vector.ipynb
+++ b/docs/docs/how_to/multi_vector.ipynb
@@ -292,7 +292,7 @@
"id": "3faa9fde-1b09-4849-a815-8b2e89c30a02",
"metadata": {},
"source": [
- "Note that we can [batch](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain accross documents:"
+ "Note that we can [batch](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable) the chain across documents:"
]
},
{
diff --git a/docs/docs/how_to/structured_output.ipynb b/docs/docs/how_to/structured_output.ipynb
index c00755650c1c9a..66d961491d265f 100644
--- a/docs/docs/how_to/structured_output.ipynb
+++ b/docs/docs/how_to/structured_output.ipynb
@@ -165,6 +165,8 @@
}
],
"source": [
+ "from typing import Optional\n",
+ "\n",
"from typing_extensions import Annotated, TypedDict\n",
"\n",
"\n",
@@ -206,10 +208,10 @@
{
"data": {
"text/plain": [
- "{'setup': 'Why was the cat sitting on the computer?',\n",
- " 'punchline': 'Because it wanted to keep an eye on the mouse!',\n",
- " 'rating': 7}"
- ]
+ "{'setup': 'Why was the cat sitting on the computer?',\n",
+ " 'punchline': 'Because it wanted to keep an eye on the mouse!',\n",
+ " 'rating': 7}"
+ ]
},
"execution_count": 4,
"metadata": {},
diff --git a/docs/docs/integrations/chat/friendli.ipynb b/docs/docs/integrations/chat/friendli.ipynb
index dab9e0bdd1f745..019f62a31bfa6b 100644
--- a/docs/docs/integrations/chat/friendli.ipynb
+++ b/docs/docs/integrations/chat/friendli.ipynb
@@ -2,10 +2,14 @@
"cells": [
{
"cell_type": "raw",
- "metadata": {},
+ "metadata": {
+ "vscode": {
+ "languageId": "raw"
+ }
+ },
"source": [
"---\n",
- "sidebar_label: Friendli\n",
+ "sidebar_label: ChatFriendli\n",
"---"
]
},
@@ -37,7 +41,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@@ -57,13 +61,13 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models.friendli import ChatFriendli\n",
"\n",
- "chat = ChatFriendli(model=\"llama-2-13b-chat\", max_tokens=100, temperature=0)"
+ "chat = ChatFriendli(model=\"meta-llama-3.1-8b-instruct\", max_tokens=100, temperature=0)"
]
},
{
@@ -84,16 +88,16 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")"
+ "AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-d47c1056-54e8-4ea9-ad63-07cf74b834b7-0')"
]
},
- "execution_count": 3,
+ "execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -111,17 +115,17 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "[AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"),\n",
- " AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")]"
+ "[AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-36775b84-2a7a-48f0-8c68-df23ffffe4b2-0'),\n",
+ " AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-b204be41-bc06-4d3a-9f74-e66ab1e60e4f-0')]"
]
},
- "execution_count": 4,
+ "execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -132,16 +136,16 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "LLMResult(generations=[[ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))], [ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))]], llm_output={}, run=[RunInfo(run_id=UUID('a0c2d733-6971-4ae7-beea-653856f4e57c')), RunInfo(run_id=UUID('f3d35e44-ac9a-459a-9e4b-b8e3a73a91e1'))])"
+ "LLMResult(generations=[[ChatGeneration(text=\"Why don't eggs tell jokes? They'd crack each other up.\", message=AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-2e4cb949-8c51-40d5-92a0-cd0ac577db83-0'))], [ChatGeneration(text=\"Why don't eggs tell jokes? They'd crack each other up.\", message=AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-afcdd1be-463c-4e50-9731-7a9f5958e396-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('2e4cb949-8c51-40d5-92a0-cd0ac577db83')), RunInfo(run_id=UUID('afcdd1be-463c-4e50-9731-7a9f5958e396'))], type='LLMResult')"
]
},
- "execution_count": 5,
+ "execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -152,18 +156,14 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- " Knock, knock!\n",
- "Who's there?\n",
- "Cows go.\n",
- "Cows go who?\n",
- "MOO!"
+ "Why don't eggs tell jokes? They'd crack each other up."
]
}
],
@@ -181,16 +181,16 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")"
+ "AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-ba8062fb-68af-47b8-bd7b-d1e01b914744-0')"
]
},
- "execution_count": 7,
+ "execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -201,17 +201,17 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "[AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"),\n",
- " AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\")]"
+ "[AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-5d2c77ab-2637-45da-8bbe-1b1f18a22369-0'),\n",
+ " AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-f1338470-8b52-4d6e-9428-a694a08ae484-0')]"
]
},
- "execution_count": 8,
+ "execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -222,16 +222,16 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "LLMResult(generations=[[ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))], [ChatGeneration(text=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\", message=AIMessage(content=\" Knock, knock!\\nWho's there?\\nCows go.\\nCows go who?\\nMOO!\"))]], llm_output={}, run=[RunInfo(run_id=UUID('f2255321-2d8e-41cc-adbd-3f4facec7573')), RunInfo(run_id=UUID('fcc297d0-6ca9-48cb-9d86-e6f78cade8ee'))])"
+ "LLMResult(generations=[[ChatGeneration(text=\"Why don't eggs tell jokes? They'd crack each other up.\", message=AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-d4e44569-39cc-40cc-93fc-de53e599fd51-0'))], [ChatGeneration(text=\"Why don't eggs tell jokes? They'd crack each other up.\", message=AIMessage(content=\"Why don't eggs tell jokes? They'd crack each other up.\", additional_kwargs={}, response_metadata={}, id='run-54647cc2-bee3-4154-ad00-2e547993e6d7-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('d4e44569-39cc-40cc-93fc-de53e599fd51')), RunInfo(run_id=UUID('54647cc2-bee3-4154-ad00-2e547993e6d7'))], type='LLMResult')"
]
},
- "execution_count": 9,
+ "execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@@ -242,18 +242,14 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- " Knock, knock!\n",
- "Who's there?\n",
- "Cows go.\n",
- "Cows go who?\n",
- "MOO!"
+ "Why don't eggs tell jokes? They'd crack each other up."
]
}
],
@@ -265,7 +261,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "langchain",
+ "display_name": ".venv",
"language": "python",
"name": "python3"
},
@@ -279,7 +275,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.7"
+ "version": "3.12.2"
}
},
"nbformat": 4,
diff --git a/docs/docs/integrations/chat/modelscope_chat_endpoint.ipynb b/docs/docs/integrations/chat/modelscope_chat_endpoint.ipynb
new file mode 100644
index 00000000000000..4cbf9e959d6997
--- /dev/null
+++ b/docs/docs/integrations/chat/modelscope_chat_endpoint.ipynb
@@ -0,0 +1,247 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "afaf8039",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_label: ModelScope\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e49f1e0d",
+ "metadata": {},
+ "source": [
+ "# ModelScopeChatEndpoint\n",
+ "\n",
+ "\n",
+ "ModelScope ([Home](https://www.modelscope.cn/) | [GitHub](https://github.com/modelscope/modelscope)) is built upon the notion of “Model-as-a-Service” (MaaS). It seeks to bring together most advanced machine learning models from the AI community, and streamlines the process of leveraging AI models in real-world applications. The core ModelScope library open-sourced in this repository provides the interfaces and implementations that allow developers to perform model inference, training and evaluation. \n",
+ "\n",
+ "This will help you getting started with ModelScope Chat Endpoint.\n",
+ "\n",
+ "\n",
+ "## Overview\n",
+ "### Integration details\n",
+ "\n",
+ "|Provider| Class | Package | Local | Serializable | Package downloads | Package latest |\n",
+ "|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n",
+ "|[ModelScope](/docs/integrations/providers/modelscope/)| ModelScopeChatEndpoint | [langchain-modelscope-integration](https://pypi.org/project/langchain-modelscope-integration/) | ❌ | ❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-modelscope-integration?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-modelscope-integration?style=flat-square&label=%20) |\n",
+ "\n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "To access ModelScope chat endpoint you'll need to create a ModelScope account, get an SDK token, and install the `langchain-modelscope-integration` integration package.\n",
+ "\n",
+ "### Credentials\n",
+ "\n",
+ "Head to [ModelScope](https://modelscope.cn/) to sign up to ModelScope and generate an [SDK token](https://modelscope.cn/my/myaccesstoken). Once you've done this set the `MODELSCOPE_SDK_TOKEN` environment variable:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "if not os.getenv(\"MODELSCOPE_SDK_TOKEN\"):\n",
+ " os.environ[\"MODELSCOPE_SDK_TOKEN\"] = getpass.getpass(\n",
+ " \"Enter your ModelScope SDK token: \"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0730d6a1-c893-4840-9817-5e5251676d5d",
+ "metadata": {},
+ "source": [
+ "### Installation\n",
+ "\n",
+ "The LangChain ModelScope integration lives in the `langchain-modelscope-integration` package:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "652d6238-1f87-422a-b135-f5abbb8652fc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-modelscope-integration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a38cde65-254d-4219-a441-068766c0d4b5",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Now we can instantiate our model object and generate chat completions:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_modelscope import ModelScopeChatEndpoint\n",
+ "\n",
+ "llm = ModelScopeChatEndpoint(\n",
+ " model=\"Qwen/Qwen2.5-Coder-32B-Instruct\",\n",
+ " temperature=0,\n",
+ " max_tokens=1024,\n",
+ " timeout=60,\n",
+ " max_retries=2,\n",
+ " # other params...\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2b4f3e15",
+ "metadata": {},
+ "source": [
+ "## Invocation\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "62e0dbc3",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "AIMessage(content='我喜欢编程。', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 33, 'total_tokens': 36, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen2.5-coder-32b-instruct', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-60bb3461-60ae-4c0b-8997-ab55ef77fcd6-0', usage_metadata={'input_tokens': 33, 'output_tokens': 3, 'total_tokens': 36, 'input_token_details': {}, 'output_token_details': {}})"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "messages = [\n",
+ " (\n",
+ " \"system\",\n",
+ " \"You are a helpful assistant that translates English to Chinese. Translate the user sentence.\",\n",
+ " ),\n",
+ " (\"human\", \"I love programming.\"),\n",
+ "]\n",
+ "ai_msg = llm.invoke(messages)\n",
+ "ai_msg"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "我喜欢编程。\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(ai_msg.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "AIMessage(content='我喜欢编程。', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 28, 'total_tokens': 31, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen2.5-coder-32b-instruct', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-9f011a3a-9a11-4759-8d16-5b1843a78862-0', usage_metadata={'input_tokens': 28, 'output_tokens': 3, 'total_tokens': 31, 'input_token_details': {}, 'output_token_details': {}})"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "\n",
+ "prompt = ChatPromptTemplate(\n",
+ " [\n",
+ " (\n",
+ " \"system\",\n",
+ " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
+ " ),\n",
+ " (\"human\", \"{input}\"),\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "chain = prompt | llm\n",
+ "chain.invoke(\n",
+ " {\n",
+ " \"input_language\": \"English\",\n",
+ " \"output_language\": \"Chinese\",\n",
+ " \"input\": \"I love programming.\",\n",
+ " }\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "For detailed documentation of all ModelScopeChatEndpoint features and configurations head to the reference: https://modelscope.cn/docs/model-service/API-Inference/intro\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/document_loaders/pypdfloader.ipynb b/docs/docs/integrations/document_loaders/pypdfloader.ipynb
index 8dc2eca23feedc..b173a654ed1816 100644
--- a/docs/docs/integrations/document_loaders/pypdfloader.ipynb
+++ b/docs/docs/integrations/document_loaders/pypdfloader.ipynb
@@ -41,13 +41,13 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {},
+ "outputs": [],
"source": [
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n",
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\""
- ],
- "outputs": [],
- "execution_count": null
+ ]
},
{
"cell_type": "markdown",
@@ -60,10 +60,12 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {},
- "source": "%pip install -qU langchain_community pypdf",
"outputs": [],
- "execution_count": null
+ "source": [
+ "%pip install -qU langchain_community pypdf"
+ ]
},
{
"cell_type": "markdown",
@@ -76,20 +78,20 @@
},
{
"cell_type": "code",
+ "execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:57:51.358924Z",
"start_time": "2025-01-02T08:57:50.664475Z"
}
},
+ "outputs": [],
"source": [
"from langchain_community.document_loaders import PyPDFLoader\n",
"\n",
"file_path = \"./example_data/layout-parser-paper.pdf\"\n",
"loader = PyPDFLoader(file_path)"
- ],
- "outputs": [],
- "execution_count": 1
+ ]
},
{
"cell_type": "markdown",
@@ -100,16 +102,13 @@
},
{
"cell_type": "code",
+ "execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:57:53.360193Z",
"start_time": "2025-01-02T08:57:53.046951Z"
}
},
- "source": [
- "docs = loader.load()\n",
- "docs[0]"
- ],
"outputs": [
{
"data": {
@@ -122,21 +121,20 @@
"output_type": "execute_result"
}
],
- "execution_count": 2
+ "source": [
+ "docs = loader.load()\n",
+ "docs[0]"
+ ]
},
{
"cell_type": "code",
+ "execution_count": 3,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:57:54.433578Z",
"start_time": "2025-01-02T08:57:54.428990Z"
}
},
- "source": [
- "import pprint\n",
- "\n",
- "pprint.pp(docs[0].metadata)"
- ],
"outputs": [
{
"name": "stdout",
@@ -159,7 +157,11 @@
]
}
],
- "execution_count": 3
+ "source": [
+ "import pprint\n",
+ "\n",
+ "pprint.pp(docs[0].metadata)"
+ ]
},
{
"cell_type": "markdown",
@@ -170,23 +172,13 @@
},
{
"cell_type": "code",
+ "execution_count": 4,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:57:56.406339Z",
"start_time": "2025-01-02T08:57:56.083505Z"
}
},
- "source": [
- "pages = []\n",
- "for doc in loader.lazy_load():\n",
- " pages.append(doc)\n",
- " if len(pages) >= 10:\n",
- " # do some paged operation, e.g.\n",
- " # index.upsert(page)\n",
- "\n",
- " pages = []\n",
- "len(pages)"
- ],
"outputs": [
{
"data": {
@@ -199,20 +191,27 @@
"output_type": "execute_result"
}
],
- "execution_count": 4
+ "source": [
+ "pages = []\n",
+ "for doc in loader.lazy_load():\n",
+ " pages.append(doc)\n",
+ " if len(pages) >= 10:\n",
+ " # do some paged operation, e.g.\n",
+ " # index.upsert(page)\n",
+ "\n",
+ " pages = []\n",
+ "len(pages)"
+ ]
},
{
"cell_type": "code",
+ "execution_count": 5,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:57:57.168112Z",
"start_time": "2025-01-02T08:57:57.164745Z"
}
},
- "source": [
- "print(pages[0].page_content[:100])\n",
- "pprint.pp(pages[0].metadata)"
- ],
"outputs": [
{
"name": "stdout",
@@ -237,7 +236,10 @@
]
}
],
- "execution_count": 5
+ "source": [
+ "print(pages[0].page_content[:100])\n",
+ "pprint.pp(pages[0].metadata)"
+ ]
},
{
"cell_type": "markdown",
@@ -278,21 +280,13 @@
},
{
"cell_type": "code",
+ "execution_count": 6,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:01.305246Z",
"start_time": "2025-01-02T08:58:01.068932Z"
}
},
- "source": [
- "loader = PyPDFLoader(\n",
- " \"./example_data/layout-parser-paper.pdf\",\n",
- " mode=\"page\",\n",
- ")\n",
- "docs = loader.load()\n",
- "print(len(docs))\n",
- "pprint.pp(docs[0].metadata)"
- ],
"outputs": [
{
"name": "stdout",
@@ -316,7 +310,15 @@
]
}
],
- "execution_count": 6
+ "source": [
+ "loader = PyPDFLoader(\n",
+ " \"./example_data/layout-parser-paper.pdf\",\n",
+ " mode=\"page\",\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "print(len(docs))\n",
+ "pprint.pp(docs[0].metadata)"
+ ]
},
{
"cell_type": "markdown",
@@ -330,21 +332,13 @@
},
{
"cell_type": "code",
+ "execution_count": 7,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:04.529532Z",
"start_time": "2025-01-02T08:58:04.241743Z"
}
},
- "source": [
- "loader = PyPDFLoader(\n",
- " \"./example_data/layout-parser-paper.pdf\",\n",
- " mode=\"single\",\n",
- ")\n",
- "docs = loader.load()\n",
- "print(len(docs))\n",
- "pprint.pp(docs[0].metadata)"
- ],
"outputs": [
{
"name": "stdout",
@@ -367,7 +361,15 @@
]
}
],
- "execution_count": 7
+ "source": [
+ "loader = PyPDFLoader(\n",
+ " \"./example_data/layout-parser-paper.pdf\",\n",
+ " mode=\"single\",\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "print(len(docs))\n",
+ "pprint.pp(docs[0].metadata)"
+ ]
},
{
"cell_type": "markdown",
@@ -381,21 +383,13 @@
},
{
"cell_type": "code",
+ "execution_count": 8,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:06.894917Z",
"start_time": "2025-01-02T08:58:06.612625Z"
}
},
- "source": [
- "loader = PyPDFLoader(\n",
- " \"./example_data/layout-parser-paper.pdf\",\n",
- " mode=\"single\",\n",
- " pages_delimitor=\"\\n-------THIS IS A CUSTOM END OF PAGE-------\\n\",\n",
- ")\n",
- "docs = loader.load()\n",
- "print(docs[0].page_content[:5780])"
- ],
"outputs": [
{
"name": "stdout",
@@ -495,7 +489,15 @@
]
}
],
- "execution_count": 8
+ "source": [
+ "loader = PyPDFLoader(\n",
+ " \"./example_data/layout-parser-paper.pdf\",\n",
+ " mode=\"single\",\n",
+ " pages_delimitor=\"\\n-------THIS IS A CUSTOM END OF PAGE-------\\n\",\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "print(docs[0].page_content[:5780])"
+ ]
},
{
"cell_type": "markdown",
@@ -528,15 +530,13 @@
},
{
"cell_type": "code",
+ "execution_count": 9,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:11.700687Z",
"start_time": "2025-01-02T08:58:10.340863Z"
}
},
- "source": [
- "%pip install -qU rapidocr-onnxruntime"
- ],
"outputs": [
{
"name": "stdout",
@@ -546,31 +546,19 @@
]
}
],
- "execution_count": 9
+ "source": [
+ "%pip install -qU rapidocr-onnxruntime"
+ ]
},
{
"cell_type": "code",
+ "execution_count": 10,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:39.423289Z",
"start_time": "2025-01-02T08:58:16.965226Z"
}
},
- "source": [
- "from langchain_community.document_loaders.parsers.pdf import (\n",
- " convert_images_to_text_with_rapidocr,\n",
- ")\n",
- "\n",
- "loader = PyPDFLoader(\n",
- " \"./example_data/layout-parser-paper.pdf\",\n",
- " mode=\"page\",\n",
- " extract_images=True,\n",
- " images_to_text=convert_images_to_text_with_rapidocr(format=\"html\"),\n",
- ")\n",
- "docs = loader.load()\n",
- "\n",
- "print(docs[5].page_content)"
- ],
"outputs": [
{
"name": "stdout",
@@ -644,7 +632,21 @@
]
}
],
- "execution_count": 10
+ "source": [
+ "from langchain_community.document_loaders.parsers.pdf import (\n",
+ " convert_images_to_text_with_rapidocr,\n",
+ ")\n",
+ "\n",
+ "loader = PyPDFLoader(\n",
+ " \"./example_data/layout-parser-paper.pdf\",\n",
+ " mode=\"page\",\n",
+ " extract_images=True,\n",
+ " images_to_text=convert_images_to_text_with_rapidocr(format=\"html\"),\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "\n",
+ "print(docs[5].page_content)"
+ ]
},
{
"cell_type": "markdown",
@@ -658,15 +660,13 @@
},
{
"cell_type": "code",
+ "execution_count": 11,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:43.775946Z",
"start_time": "2025-01-02T08:58:42.336954Z"
}
},
- "source": [
- "%pip install -qU pytesseract"
- ],
"outputs": [
{
"name": "stdout",
@@ -676,30 +676,19 @@
]
}
],
- "execution_count": 11
+ "source": [
+ "%pip install -qU pytesseract"
+ ]
},
{
"cell_type": "code",
+ "execution_count": 12,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:58:59.618970Z",
"start_time": "2025-01-02T08:58:49.364242Z"
}
},
- "source": [
- "from langchain_community.document_loaders.parsers.pdf import (\n",
- " convert_images_to_text_with_tesseract,\n",
- ")\n",
- "\n",
- "loader = PyPDFLoader(\n",
- " \"./example_data/layout-parser-paper.pdf\",\n",
- " mode=\"page\",\n",
- " extract_images=True,\n",
- " images_to_text=convert_images_to_text_with_tesseract(format=\"text\"),\n",
- ")\n",
- "docs = loader.load()\n",
- "print(docs[5].page_content)"
- ],
"outputs": [
{
"name": "stdout",
@@ -773,7 +762,20 @@
]
}
],
- "execution_count": 12
+ "source": [
+ "from langchain_community.document_loaders.parsers.pdf import (\n",
+ " convert_images_to_text_with_tesseract,\n",
+ ")\n",
+ "\n",
+ "loader = PyPDFLoader(\n",
+ " \"./example_data/layout-parser-paper.pdf\",\n",
+ " mode=\"page\",\n",
+ " extract_images=True,\n",
+ " images_to_text=convert_images_to_text_with_tesseract(format=\"text\"),\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "print(docs[5].page_content)"
+ ]
},
{
"cell_type": "markdown",
@@ -782,15 +784,13 @@
},
{
"cell_type": "code",
+ "execution_count": 13,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:59:03.250256Z",
"start_time": "2025-01-02T08:59:01.833376Z"
}
},
- "source": [
- "%pip install -qU langchain_openai"
- ],
"outputs": [
{
"name": "stdout",
@@ -800,23 +800,19 @@
]
}
],
- "execution_count": 13
+ "source": [
+ "%pip install -qU langchain_openai"
+ ]
},
{
"cell_type": "code",
+ "execution_count": 14,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:59:03.400821Z",
"start_time": "2025-01-02T08:59:03.373435Z"
}
},
- "source": [
- "import os\n",
- "\n",
- "from dotenv import load_dotenv\n",
- "\n",
- "load_dotenv()"
- ],
"outputs": [
{
"data": {
@@ -829,51 +825,40 @@
"output_type": "execute_result"
}
],
- "execution_count": 14
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "load_dotenv()"
+ ]
},
{
"cell_type": "code",
+ "execution_count": 15,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T08:59:21.919330Z",
"start_time": "2025-01-02T08:59:21.916097Z"
}
},
+ "outputs": [],
"source": [
"from getpass import getpass\n",
"\n",
"if not os.environ.get(\"OPENAI_API_KEY\"):\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")"
- ],
- "outputs": [],
- "execution_count": 15
+ ]
},
{
"cell_type": "code",
+ "execution_count": 16,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T09:00:29.136755Z",
"start_time": "2025-01-02T08:59:27.838056Z"
}
},
- "source": [
- "from langchain_openai import ChatOpenAI\n",
- "\n",
- "from langchain_community.document_loaders.parsers.pdf import (\n",
- " convert_images_to_description,\n",
- ")\n",
- "\n",
- "loader = PyPDFLoader(\n",
- " \"./example_data/layout-parser-paper.pdf\",\n",
- " mode=\"page\",\n",
- " extract_images=True,\n",
- " images_to_text=convert_images_to_description(\n",
- " model=ChatOpenAI(model=\"gpt-4o\", max_tokens=1024), format=\"markdown\"\n",
- " ),\n",
- ")\n",
- "docs = loader.load()\n",
- "print(docs[5].page_content)"
- ],
"outputs": [
{
"name": "stdout",
@@ -934,7 +919,23 @@
]
}
],
- "execution_count": 16
+ "source": [
+ "from langchain_community.document_loaders.parsers.pdf import (\n",
+ " convert_images_to_description,\n",
+ ")\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "loader = PyPDFLoader(\n",
+ " \"./example_data/layout-parser-paper.pdf\",\n",
+ " mode=\"page\",\n",
+ " extract_images=True,\n",
+ " images_to_text=convert_images_to_description(\n",
+ " model=ChatOpenAI(model=\"gpt-4o\", max_tokens=1024), format=\"markdown\"\n",
+ " ),\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "print(docs[5].page_content)"
+ ]
},
{
"cell_type": "markdown",
@@ -950,29 +951,13 @@
},
{
"cell_type": "code",
+ "execution_count": 17,
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-02T09:00:37.080458Z",
"start_time": "2025-01-02T09:00:36.795081Z"
}
},
- "source": [
- "from langchain_community.document_loaders import FileSystemBlobLoader\n",
- "from langchain_community.document_loaders.generic import GenericLoader\n",
- "\n",
- "from langchain_community.document_loaders.parsers import PyPDFParser\n",
- "\n",
- "loader = GenericLoader(\n",
- " blob_loader=FileSystemBlobLoader(\n",
- " path=\"./example_data/\",\n",
- " glob=\"*.pdf\",\n",
- " ),\n",
- " blob_parser=PyPDFParser(),\n",
- ")\n",
- "docs = loader.load()\n",
- "print(docs[0].page_content)\n",
- "pprint.pp(docs[0].metadata)"
- ],
"outputs": [
{
"name": "stdout",
@@ -1035,7 +1020,22 @@
]
}
],
- "execution_count": 17
+ "source": [
+ "from langchain_community.document_loaders import FileSystemBlobLoader\n",
+ "from langchain_community.document_loaders.generic import GenericLoader\n",
+ "from langchain_community.document_loaders.parsers import PyPDFParser\n",
+ "\n",
+ "loader = GenericLoader(\n",
+ " blob_loader=FileSystemBlobLoader(\n",
+ " path=\"./example_data/\",\n",
+ " glob=\"*.pdf\",\n",
+ " ),\n",
+ " blob_parser=PyPDFParser(),\n",
+ ")\n",
+ "docs = loader.load()\n",
+ "print(docs[0].page_content)\n",
+ "pprint.pp(docs[0].metadata)"
+ ]
},
{
"cell_type": "markdown",
@@ -1044,7 +1044,9 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {},
+ "outputs": [],
"source": [
"from langchain_community.document_loaders import CloudBlobLoader\n",
"from langchain_community.document_loaders.generic import GenericLoader\n",
@@ -1059,9 +1061,7 @@
"docs = loader.load()\n",
"print(docs[0].page_content)\n",
"pprint.pp(docs[0].metadata)"
- ],
- "outputs": [],
- "execution_count": null
+ ]
},
{
"cell_type": "markdown",
diff --git a/docs/docs/integrations/document_loaders/recursive_url.ipynb b/docs/docs/integrations/document_loaders/recursive_url.ipynb
index 98c87977435e31..0820fb29356168 100644
--- a/docs/docs/integrations/document_loaders/recursive_url.ipynb
+++ b/docs/docs/integrations/document_loaders/recursive_url.ipynb
@@ -44,7 +44,7 @@
"metadata": {},
"outputs": [],
"source": [
- "%pip install -qU langchain-community beautifulsoup4"
+ "%pip install -qU langchain-community beautifulsoup4 lxml"
]
},
{
diff --git a/docs/docs/integrations/document_loaders/slack.ipynb b/docs/docs/integrations/document_loaders/slack.ipynb
index 648ecda4e86fda..98a8243511bd5e 100644
--- a/docs/docs/integrations/document_loaders/slack.ipynb
+++ b/docs/docs/integrations/document_loaders/slack.ipynb
@@ -41,7 +41,7 @@
"source": [
"# Optionally set your Slack URL. This will give you proper URLs in the docs sources.\n",
"SLACK_WORKSPACE_URL = \"https://xxx.slack.com\"\n",
- "LOCAL_ZIPFILE = \"\" # Paste the local paty to your Slack zip file here.\n",
+ "LOCAL_ZIPFILE = \"\" # Paste the local path to your Slack zip file here.\n",
"\n",
"loader = SlackDirectoryLoader(LOCAL_ZIPFILE, SLACK_WORKSPACE_URL)"
]
diff --git a/docs/docs/integrations/document_loaders/unstructured_pdfloader.ipynb b/docs/docs/integrations/document_loaders/unstructured_pdfloader.ipynb_future
similarity index 100%
rename from docs/docs/integrations/document_loaders/unstructured_pdfloader.ipynb
rename to docs/docs/integrations/document_loaders/unstructured_pdfloader.ipynb_future
diff --git a/docs/docs/integrations/llms/friendli.ipynb b/docs/docs/integrations/llms/friendli.ipynb
index 529b6aec7a0ba1..74978a39aa5f7d 100644
--- a/docs/docs/integrations/llms/friendli.ipynb
+++ b/docs/docs/integrations/llms/friendli.ipynb
@@ -29,7 +29,7 @@
"Ensure the `langchain_community` and `friendli-client` are installed.\n",
"\n",
"```sh\n",
- "pip install -U langchain-community friendli-client.\n",
+ "pip install -U langchain-community friendli-client\n",
"```\n",
"\n",
"Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token, and set it as the `FRIENDLI_TOKEN` environment."
@@ -40,13 +40,20 @@
"execution_count": 1,
"metadata": {},
"outputs": [],
- "source": ["import getpass\nimport os\n\nif \"FRIENDLI_TOKEN\" not in os.environ:\n os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"]
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "if \"FRIENDLI_TOKEN\" not in os.environ:\n",
+ " os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"
+ ]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "You can initialize a Friendli chat model with selecting the model you want to use. The default model is `mixtral-8x7b-instruct-v0-1`. You can check the available models at [docs.friendli.ai](https://docs.periflow.ai/guides/serverless_endpoints/pricing#text-generation-models)."
+ "You can initialize a Friendli chat model with selecting the model you want to use. \n",
+ "The default model is `meta-llama-3.1-8b-instruct`. You can check the available models at [friendli.ai/docs](https://friendli.ai/docs/guides/serverless_endpoints/pricing#text-generation-models)."
]
},
{
@@ -54,7 +61,11 @@
"execution_count": 2,
"metadata": {},
"outputs": [],
- "source": ["from langchain_community.llms.friendli import Friendli\n\nllm = Friendli(model=\"mixtral-8x7b-instruct-v0-1\", max_tokens=100, temperature=0)"]
+ "source": [
+ "from langchain_community.llms.friendli import Friendli\n",
+ "\n",
+ "llm = Friendli(model=\"meta-llama-3.1-8b-instruct\", max_tokens=100, temperature=0)"
+ ]
},
{
"cell_type": "markdown",
@@ -80,7 +91,7 @@
{
"data": {
"text/plain": [
- "'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"'"
+ "\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\""
]
},
"execution_count": 3,
@@ -88,7 +99,9 @@
"output_type": "execute_result"
}
],
- "source": ["llm.invoke(\"Tell me a joke.\")"]
+ "source": [
+ "llm.invoke(\"Tell me a joke.\")"
+ ]
},
{
"cell_type": "code",
@@ -98,8 +111,8 @@
{
"data": {
"text/plain": [
- "['Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"',\n",
- " 'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"']"
+ "[\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\",\n",
+ " \" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\"]"
]
},
"execution_count": 4,
@@ -107,7 +120,9 @@
"output_type": "execute_result"
}
],
- "source": ["llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])"]
+ "source": [
+ "llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])"
+ ]
},
{
"cell_type": "code",
@@ -117,7 +132,7 @@
{
"data": {
"text/plain": [
- "LLMResult(generations=[[Generation(text='Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"')], [Generation(text='Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"')]], llm_output={'model': 'mixtral-8x7b-instruct-v0-1'}, run=[RunInfo(run_id=UUID('a2009600-baae-4f5a-9f69-23b2bc916e4c')), RunInfo(run_id=UUID('acaf0838-242c-4255-85aa-8a62b675d046'))])"
+ "LLMResult(generations=[[Generation(text=\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\")], [Generation(text=\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\")]], llm_output={'model': 'meta-llama-3.1-8b-instruct'}, run=[RunInfo(run_id=UUID('ee97984b-6eab-4d40-a56f-51d6114953de')), RunInfo(run_id=UUID('cbe501ea-a20f-4420-9301-86cdfcf898c0'))], type='LLMResult')"
]
},
"execution_count": 5,
@@ -125,25 +140,19 @@
"output_type": "execute_result"
}
],
- "source": ["llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])"]
+ "source": [
+ "llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])"
+ ]
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Username checks out.\n",
- "User 1: I'm not sure if you're being sarcastic or not, but I'll take it as a compliment.\n",
- "User 0: I'm not being sarcastic. I'm just saying that your username is very fitting.\n",
- "User 1: Oh, I thought you were saying that I'm a \"dumbass\" because I'm a \"dumbass\" who \"checks out\""
- ]
- }
- ],
- "source": ["for chunk in llm.stream(\"Tell me a joke.\"):\n print(chunk, end=\"\", flush=True)"]
+ "outputs": [],
+ "source": [
+ "for chunk in llm.stream(\"Tell me a joke.\"):\n",
+ " print(chunk, end=\"\", flush=True)"
+ ]
},
{
"cell_type": "markdown",
@@ -154,81 +163,79 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"'"
+ "\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\""
]
},
- "execution_count": 7,
+ "execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
- "source": ["await llm.ainvoke(\"Tell me a joke.\")"]
+ "source": [
+ "await llm.ainvoke(\"Tell me a joke.\")"
+ ]
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "['Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"',\n",
- " 'Username checks out.\\nUser 1: I\\'m not sure if you\\'re being sarcastic or not, but I\\'ll take it as a compliment.\\nUser 0: I\\'m not being sarcastic. I\\'m just saying that your username is very fitting.\\nUser 1: Oh, I thought you were saying that I\\'m a \"dumbass\" because I\\'m a \"dumbass\" who \"checks out\"']"
+ "[\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\",\n",
+ " \" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\"]"
]
},
- "execution_count": 8,
+ "execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
- "source": ["await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])"]
+ "source": [
+ "await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])"
+ ]
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "LLMResult(generations=[[Generation(text=\"Username checks out.\\nUser 1: I'm not sure if you're being serious or not, but I'll take it as a compliment.\\nUser 0: I'm being serious. I'm not sure if you're being serious or not.\\nUser 1: I'm being serious. I'm not sure if you're being serious or not.\\nUser 0: I'm being serious. I'm not sure\")], [Generation(text=\"Username checks out.\\nUser 1: I'm not sure if you're being serious or not, but I'll take it as a compliment.\\nUser 0: I'm being serious. I'm not sure if you're being serious or not.\\nUser 1: I'm being serious. I'm not sure if you're being serious or not.\\nUser 0: I'm being serious. I'm not sure\")]], llm_output={'model': 'mixtral-8x7b-instruct-v0-1'}, run=[RunInfo(run_id=UUID('46144905-7350-4531-a4db-22e6a827c6e3')), RunInfo(run_id=UUID('e2b06c30-ffff-48cf-b792-be91f2144aa6'))])"
+ "LLMResult(generations=[[Generation(text=\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\")], [Generation(text=\" I need a laugh.\\nHere's one: Why couldn't the bicycle stand up by itself?\\nBecause it was two-tired!\\nI hope that made you laugh! Do you want to hear another one? I have a million of 'em! (Okay, maybe not a million, but I have a few more where that came from!) What kind of joke are you in the mood for? A pun, a play on words, or something else? Let me know and I'll try to come\")]], llm_output={'model': 'meta-llama-3.1-8b-instruct'}, run=[RunInfo(run_id=UUID('857bd88e-e68a-46d2-8ad3-4a282c199a89')), RunInfo(run_id=UUID('a6ba6e7f-9a7a-4aa1-a2ac-c8fcf48309d3'))], type='LLMResult')"
]
},
- "execution_count": 9,
+ "execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
- "source": ["await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])"]
+ "source": [
+ "await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])"
+ ]
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Username checks out.\n",
- "User 1: I'm not sure if you're being sarcastic or not, but I'll take it as a compliment.\n",
- "User 0: I'm not being sarcastic. I'm just saying that your username is very fitting.\n",
- "User 1: Oh, I thought you were saying that I'm a \"dumbass\" because I'm a \"dumbass\" who \"checks out\""
- ]
- }
- ],
- "source": ["async for chunk in llm.astream(\"Tell me a joke.\"):\n print(chunk, end=\"\", flush=True)"]
+ "outputs": [],
+ "source": [
+ "async for chunk in llm.astream(\"Tell me a joke.\"):\n",
+ " print(chunk, end=\"\", flush=True)"
+ ]
}
],
"metadata": {
"kernelspec": {
- "display_name": "langchain",
+ "display_name": ".venv",
"language": "python",
"name": "python3"
},
@@ -242,7 +249,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.11.7"
+ "version": "3.12.2"
}
},
"nbformat": 4,
diff --git a/docs/docs/integrations/llms/modelscope_endpoint.ipynb b/docs/docs/integrations/llms/modelscope_endpoint.ipynb
new file mode 100644
index 00000000000000..e4bd19a368dd12
--- /dev/null
+++ b/docs/docs/integrations/llms/modelscope_endpoint.ipynb
@@ -0,0 +1,294 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "67db2992",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_label: ModelScope\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9597802c",
+ "metadata": {},
+ "source": [
+ "# ModelScopeEndpoint\n",
+ "\n",
+ "ModelScope ([Home](https://www.modelscope.cn/) | [GitHub](https://github.com/modelscope/modelscope)) is built upon the notion of “Model-as-a-Service” (MaaS). It seeks to bring together most advanced machine learning models from the AI community, and streamlines the process of leveraging AI models in real-world applications. The core ModelScope library open-sourced in this repository provides the interfaces and implementations that allow developers to perform model inference, training and evaluation. This will help you get started with ModelScope completion models (LLMs) using LangChain.\n",
+ "\n",
+ "## Overview\n",
+ "### Integration details\n",
+ "\n",
+ "| Provider | Class | Package | Local | Serializable | Package downloads | Package latest |\n",
+ "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
+ "| [ModelScope](/docs/integrations/providers/modelscope/) | ModelScopeEndpoint | [langchain-modelscope-integration](https://pypi.org/project/langchain-modelscope-integration/) | ❌ | ❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-modelscope-integration?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-modelscope-integration?style=flat-square&label=%20) |\n",
+ "\n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "To access ModelScope models you'll need to create a ModelScope account, get an SDK token, and install the `langchain-modelscope-integration` integration package.\n",
+ "\n",
+ "### Credentials\n",
+ "\n",
+ "\n",
+ "Head to [ModelScope](https://modelscope.cn/) to sign up to ModelScope and generate an [SDK token](https://modelscope.cn/my/myaccesstoken). Once you've done this set the `MODELSCOPE_SDK_TOKEN` environment variable:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "bc51e756",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "if not os.getenv(\"MODELSCOPE_SDK_TOKEN\"):\n",
+ " os.environ[\"MODELSCOPE_SDK_TOKEN\"] = getpass.getpass(\n",
+ " \"Enter your ModelScope SDK token: \"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "809c6577",
+ "metadata": {},
+ "source": [
+ "### Installation\n",
+ "\n",
+ "The LangChain ModelScope integration lives in the `langchain-modelscope-integration` package:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "59c710c4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-modelscope-integration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0a760037",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Now we can instantiate our model object and generate chat completions:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "a0562a13",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_modelscope import ModelScopeEndpoint\n",
+ "\n",
+ "llm = ModelScopeEndpoint(\n",
+ " model=\"Qwen/Qwen2.5-Coder-32B-Instruct\",\n",
+ " temperature=0,\n",
+ " max_tokens=1024,\n",
+ " timeout=60,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0ee90032",
+ "metadata": {},
+ "source": [
+ "## Invocation\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "035dea0f",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Certainly! Quick sort is a popular and efficient sorting algorithm that uses a divide-and-conquer approach to sort elements. Below is a simple implementation of the Quick Sort algorithm in Python:\\n\\n```python\\ndef quick_sort(arr):\\n # Base case: if the array is empty or has one element, it\\'s already sorted\\n if len(arr) <= 1:\\n return arr\\n else:\\n # Choose a pivot element from the array\\n pivot = arr[len(arr) // 2]\\n \\n # Partition the array into three parts:\\n # - elements less than the pivot\\n # - elements equal to the pivot\\n # - elements greater than the pivot\\n less_than_pivot = [x for x in arr if x < pivot]\\n equal_to_pivot = [x for x in arr if x == pivot]\\n greater_than_pivot = [x for x in arr if x > pivot]\\n \\n # Recursively apply quick_sort to the less_than_pivot and greater_than_pivot subarrays\\n return quick_sort(less_than_pivot) + equal_to_pivot + quick_sort(greater_than_pivot)\\n\\n# Example usage:\\narr = [3, 6, 8, 10, 1, 2, 1]\\nsorted_arr = quick_sort(arr)\\nprint(\"Sorted array:\", sorted_arr)\\n```\\n\\n### Explanation:\\n1. **Base Case**: If the array has one or zero elements, it is already sorted, so we return it as is.\\n2. **Pivot Selection**: We choose the middle element of the array as the pivot. This is a simple strategy, but there are other strategies for choosing a pivot.\\n3. **Partitioning**: We partition the array into three lists:\\n - `less_than_pivot`: Elements less than the pivot.\\n - `equal_to_pivot`: Elements equal to the pivot.\\n - `greater_than_pivot`: Elements greater than the pivot.\\n4. **Recursive Sorting**: We recursively sort the `less_than_pivot` and `greater_than_pivot` lists and concatenate them with the `equal_to_pivot` list to get the final sorted array.\\n\\nThis implementation is straightforward and easy to understand, but it may not be the most efficient in terms of space complexity due to the use of additional lists. For an in-place version of Quick Sort, you can modify the algorithm to sort the array within its own memory space.'"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "input_text = \"Write a quick sort algorithm in python\"\n",
+ "\n",
+ "completion = llm.invoke(input_text)\n",
+ "completion"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "d5431620",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Certainly! Sorting an array is a common task in programming, and Python provides several ways to do it. Below is a simple example using Python's built-in sorting functions. We'll use the `sorted()` function and the `sort()` method of a list.\n",
+ "\n",
+ "### Using `sorted()` Function\n",
+ "\n",
+ "The `sorted()` function returns a new sorted list from the elements of any iterable.\n",
+ "\n",
+ "```python\n",
+ "def sort_array(arr):\n",
+ " return sorted(arr)\n",
+ "\n",
+ "# Example usage\n",
+ "array = [5, 2, 9, 1, 5, 6]\n",
+ "sorted_array = sort_array(array)\n",
+ "print(\"Original array:\", array)\n",
+ "print(\"Sorted array:\", sorted_array)\n",
+ "```\n",
+ "\n",
+ "### Using `sort()` Method\n",
+ "\n",
+ "The `sort()` method sorts the list in place and returns `None`.\n",
+ "\n",
+ "```python\n",
+ "def sort_array_in_place(arr):\n",
+ " arr.sort()\n",
+ "\n",
+ "# Example usage\n",
+ "array = [5, 2, 9, 1, 5, 6]\n",
+ "sort_array_in_place(array)\n",
+ "print(\"Sorted array:\", array)\n",
+ "```\n",
+ "\n",
+ "### Custom Sorting\n",
+ "\n",
+ "If you need to sort the array based on a custom key or in descending order, you can use the `key` and `reverse` parameters.\n",
+ "\n",
+ "```python\n",
+ "def custom_sort_array(arr):\n",
+ " # Sort in descending order\n",
+ " return sorted(arr, reverse=True)\n",
+ "\n",
+ "# Example usage\n",
+ "array = [5, 2, 9, 1, 5, 6]\n",
+ "sorted_array_desc = custom_sort_array(array)\n",
+ "print(\"Sorted array in descending order:\", sorted_array_desc)\n",
+ "```\n",
+ "\n",
+ "### Sorting with a Custom Key\n",
+ "\n",
+ "Suppose you have a list of tuples and you want to sort them based on the second element of each tuple:\n",
+ "\n",
+ "```python\n",
+ "def sort_tuples_by_second_element(arr):\n",
+ " return sorted(arr, key=lambda x: x[1])\n",
+ "\n",
+ "# Example usage\n",
+ "tuples = [(1, 3), (4, 1), (5, 2), (2, 4)]\n",
+ "sorted_tuples = sort_tuples_by_second_element(tuples)\n",
+ "print(\"Sorted tuples by second element:\", sorted_tuples)\n",
+ "```\n",
+ "\n",
+ "These examples demonstrate how to sort arrays in Python using different methods and options. Choose the one that best fits your needs!"
+ ]
+ }
+ ],
+ "source": [
+ "for chunk in llm.stream(\"write a python program to sort an array\"):\n",
+ " print(chunk, end=\"\", flush=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "add38532",
+ "metadata": {},
+ "source": [
+ "## Chaining\n",
+ "\n",
+ "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "078e9db2",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'In Chinese, you can say \"我喜欢编程\" (Wǒ xǐ huān biān chéng) to express \"I love programming.\" Here\\'s a breakdown of the sentence:\\n\\n- 我 (Wǒ) means \"I\"\\n- 喜欢 (xǐ huān) means \"love\" or \"like\"\\n- 编程 (biān chéng) means \"programming\"\\n\\nSo, when you put it all together, it translates to \"I love programming.\"'"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain_core.prompts import PromptTemplate\n",
+ "\n",
+ "prompt = PromptTemplate(template=\"How to say {input} in {output_language}:\\n\")\n",
+ "\n",
+ "chain = prompt | llm\n",
+ "chain.invoke(\n",
+ " {\n",
+ " \"output_language\": \"Chinese\",\n",
+ " \"input\": \"I love programming.\",\n",
+ " }\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e9bdfcef",
+ "metadata": {},
+ "source": [
+ "## API reference\n",
+ "\n",
+ "Refer to https://modelscope.cn/docs/model-service/API-Inference/intro for more details."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.11.1 64-bit",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.16"
+ },
+ "vscode": {
+ "interpreter": {
+ "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/memory/falkordb_chat_message_history.ipynb b/docs/docs/integrations/memory/falkordb_chat_message_history.ipynb
new file mode 100644
index 00000000000000..759841778bc50f
--- /dev/null
+++ b/docs/docs/integrations/memory/falkordb_chat_message_history.ipynb
@@ -0,0 +1,73 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# FalkorDB\n",
+ "\n",
+ "FalkorDB is an open-source graph database management system, renowned for its efficient management of highly connected data. Unlike traditional databases that store data in tables, FalkorDB uses a graph structure with nodes, edges, and properties to represent and store data. This design allows for high-performance queries on complex data relationships.\n",
+ "\n",
+ "This notebook goes over how to use `FalkorDB` to store chat message history\n",
+ "\n",
+ "**NOTE**: You can use FalkorDB locally or use FalkorDB Cloud. See installation instructions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# For this example notebook we will be using FalkorDB locally\n",
+ "host = \"localhost\"\n",
+ "port = 6379"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_falkordb.message_history import (\n",
+ " FalkorDBChatMessageHistory,\n",
+ ")\n",
+ "\n",
+ "history = FalkorDBChatMessageHistory(host=host, port=port, session_id=\"session_id_1\")\n",
+ "\n",
+ "history.add_user_message(\"hi!\")\n",
+ "\n",
+ "history.add_ai_message(\"whats up?\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[HumanMessage(content='hi!', additional_kwargs={}, response_metadata={}),\n",
+ " AIMessage(content='whats up?', additional_kwargs={}, response_metadata={})]"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "history.messages"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/docs/integrations/providers/falkordb.ipynb b/docs/docs/integrations/providers/falkordb.ipynb
new file mode 100644
index 00000000000000..8ac41a19cfddc0
--- /dev/null
+++ b/docs/docs/integrations/providers/falkordb.ipynb
@@ -0,0 +1,72 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# FalkorDB\n",
+ "\n",
+ ">What is `FalkorDB`?\n",
+ "\n",
+ ">- FalkorDB is an `open-source database management system` that specializes in graph database technology.\n",
+ ">- FalkorDB allows you to represent and store data in nodes and edges, making it ideal for handling connected data and relationships.\n",
+ ">- FalkorDB Supports OpenCypher query language with proprietary extensions, making it easy to interact with and query your graph data.\n",
+ ">- With FalkorDB, you can achieve high-performance `graph traversals and queries`, suitable for production-level systems.\n",
+ "\n",
+ ">Get started with FalkorDB by visiting [their website](https://docs.falkordb.com/)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Installation and Setup\n",
+ "\n",
+ "- Install the Python SDK with `pip install falkordb langchain-falkordb`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## VectorStore\n",
+ "\n",
+ "The FalkorDB vector index is used as a vectorstore,\n",
+ "whether for semantic search or example selection.\n",
+ "\n",
+ "```python\n",
+ "from langchain_community.vectorstores.falkordb_vector import FalkorDBVector\n",
+ "```\n",
+ "or \n",
+ "\n",
+ "```python\n",
+ "from langchain_falkordb.vectorstore import FalkorDBVector\n",
+ "```\n",
+ "\n",
+ "See a [usage example](/docs/integrations/vectorstores/falkordbvector.ipynb)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Memory\n",
+ "\n",
+ "See a [usage example](/docs/integrations/memory/falkordb_chat_message_history.ipynb).\n",
+ "\n",
+ "```python\n",
+ "from langchain_falkordb.message_history import (\n",
+ " FalkorDBChatMessageHistory,\n",
+ ")\n",
+ "```"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/docs/integrations/providers/friendli.md b/docs/docs/integrations/providers/friendli.mdx
similarity index 56%
rename from docs/docs/integrations/providers/friendli.md
rename to docs/docs/integrations/providers/friendli.mdx
index e0f3a49b68b3d4..2e8fda9ade2f6e 100644
--- a/docs/docs/integrations/providers/friendli.md
+++ b/docs/docs/integrations/providers/friendli.mdx
@@ -1,6 +1,6 @@
# Friendli AI
->[FriendliAI](https://friendli.ai/) enhances AI application performance and optimizes
+> [FriendliAI](https://friendli.ai/) enhances AI application performance and optimizes
> cost savings with scalable, efficient deployment options, tailored for high-demand AI workloads.
## Installation and setup
@@ -8,10 +8,11 @@
Install the `friendli-client` python package.
```bash
-pip install friendli-client
+pip install -U langchain_community friendli-client
```
+
Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token,
-and set it as the `FRIENDLI_TOKEN` environment variable.
+and set it as the `FRIENDLI_TOKEN` environment variabzle.
## Chat models
@@ -20,6 +21,11 @@ See a [usage example](/docs/integrations/chat/friendli).
```python
from langchain_community.chat_models.friendli import ChatFriendli
+
+chat = ChatFriendli(model='meta-llama-3.1-8b-instruct')
+
+for m in chat.stream("Tell me fun things to do in NYC"):
+ print(m.content, end="", flush=True)
```
## LLMs
@@ -28,4 +34,8 @@ See a [usage example](/docs/integrations/llms/friendli).
```python
from langchain_community.llms.friendli import Friendli
+
+llm = Friendli(model='meta-llama-3.1-8b-instruct')
+
+print(llm.invoke("def bubble_sort(): "))
```
diff --git a/docs/docs/integrations/providers/friendly.md b/docs/docs/integrations/providers/friendly.md
deleted file mode 100644
index 834a1ebfe802b6..00000000000000
--- a/docs/docs/integrations/providers/friendly.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Friendli AI
-
->[Friendli AI](https://friendli.ai/) is a company that fine-tunes, deploys LLMs,
-> and serves a wide range of Generative AI use cases.
-
-
-## Installation and setup
-
-- Install the integration package:
-
- ```
- pip install friendli-client
- ```
-
-- Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token,
-and set it as the `FRIENDLI_TOKEN` environment.
-
-## Chat models
-
-See a [usage example](/docs/integrations/chat/friendli).
-
-```python
-from langchain_community.chat_models.friendli import ChatFriendli
-```
-
-## LLMs
-
-See a [usage example](/docs/integrations/llms/friendli).
-
-```python
-from langchain_community.llms.friendli import Friendli
-```
diff --git a/docs/docs/integrations/providers/modelscope.mdx b/docs/docs/integrations/providers/modelscope.mdx
index 34c421ea707e8f..30c50e33bd58aa 100644
--- a/docs/docs/integrations/providers/modelscope.mdx
+++ b/docs/docs/integrations/providers/modelscope.mdx
@@ -5,20 +5,46 @@
This page covers how to use the modelscope ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific modelscope wrappers.
-## Installation and Setup
+## Installation
-Install the `modelscope` package.
-
```bash
-pip install modelscope
+pip install -U langchain-modelscope-integration
```
+Head to [ModelScope](https://modelscope.cn/) to sign up to ModelScope and generate an [SDK token](https://modelscope.cn/my/myaccesstoken). Once you've done this set the `MODELSCOPE_SDK_TOKEN` environment variable:
-## Text Embedding Models
+```bash
+export MODELSCOPE_SDK_TOKEN=
+```
+
+## Chat Models
+
+`ModelScopeChatEndpoint` class exposes chat models from ModelScope. See available models [here](https://www.modelscope.cn/docs/model-service/API-Inference/intro).
+
+```python
+from langchain_modelscope import ModelScopeChatEndpoint
+
+llm = ModelScopeChatEndpoint(model="Qwen/Qwen2.5-Coder-32B-Instruct")
+llm.invoke("Sing a ballad of LangChain.")
+```
+
+## Embeddings
+`ModelScopeEmbeddings` class exposes embeddings from ModelScope.
```python
-from langchain_community.embeddings import ModelScopeEmbeddings
+from langchain_modelscope import ModelScopeEmbeddings
+
+embeddings = ModelScopeEmbeddings(model_id="damo/nlp_corom_sentence-embedding_english-base")
+embeddings.embed_query("What is the meaning of life?")
```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/modelscope_hub)
+## LLMs
+`ModelScopeLLM` class exposes LLMs from ModelScope.
+
+```python
+from langchain_modelscope import ModelScopeLLM
+
+llm = ModelScopeLLM(model="Qwen/Qwen2.5-Coder-32B-Instruct")
+llm.invoke("The meaning of life is")
+```
diff --git a/docs/docs/integrations/text_embedding/modelscope_embedding.ipynb b/docs/docs/integrations/text_embedding/modelscope_embedding.ipynb
new file mode 100644
index 00000000000000..b5db8dbab9a199
--- /dev/null
+++ b/docs/docs/integrations/text_embedding/modelscope_embedding.ipynb
@@ -0,0 +1,285 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "afaf8039",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_label: ModelScope\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9a3d6f34",
+ "metadata": {},
+ "source": [
+ "# ModelScopeEmbeddings\n",
+ "\n",
+ "ModelScope ([Home](https://www.modelscope.cn/) | [GitHub](https://github.com/modelscope/modelscope)) is built upon the notion of “Model-as-a-Service” (MaaS). It seeks to bring together most advanced machine learning models from the AI community, and streamlines the process of leveraging AI models in real-world applications. The core ModelScope library open-sourced in this repository provides the interfaces and implementations that allow developers to perform model inference, training and evaluation. \n",
+ "\n",
+ "This will help you get started with ModelScope embedding models using LangChain.\n",
+ "\n",
+ "## Overview\n",
+ "### Integration details\n",
+ "\n",
+ "| Provider | Package |\n",
+ "|:--------:|:-------:|\n",
+ "| [ModelScope](/docs/integrations/providers/modelscope/) | [langchain-modelscope-integration](https://pypi.org/project/langchain-modelscope-integration/) |\n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "To access ModelScope embedding models you'll need to create a/an ModelScope account, get an API key, and install the `langchain-modelscope-integration` integration package.\n",
+ "\n",
+ "### Credentials\n",
+ "\n",
+ "Head to [ModelScope](https://modelscope.cn/) to sign up to ModelScope."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "36521c2a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "if not os.getenv(\"MODELSCOPE_SDK_TOKEN\"):\n",
+ " os.environ[\"MODELSCOPE_SDK_TOKEN\"] = getpass.getpass(\n",
+ " \"Enter your ModelScope SDK token: \"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d9664366",
+ "metadata": {},
+ "source": [
+ "### Installation\n",
+ "\n",
+ "The LangChain ModelScope integration lives in the `langchain-modelscope-integration` package:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "64853226",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-modelscope-integration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "45dd1724",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Now we can instantiate our model object:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "9ea7a09b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Downloading Model to directory: /root/.cache/modelscope/hub/damo/nlp_corom_sentence-embedding_english-base\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "2024-12-27 16:15:11,175 - modelscope - WARNING - Model revision not specified, use revision: v1.0.0\n",
+ "2024-12-27 16:15:11,443 - modelscope - INFO - initiate model from /root/.cache/modelscope/hub/damo/nlp_corom_sentence-embedding_english-base\n",
+ "2024-12-27 16:15:11,444 - modelscope - INFO - initiate model from location /root/.cache/modelscope/hub/damo/nlp_corom_sentence-embedding_english-base.\n",
+ "2024-12-27 16:15:11,445 - modelscope - INFO - initialize model from /root/.cache/modelscope/hub/damo/nlp_corom_sentence-embedding_english-base\n",
+ "2024-12-27 16:15:12,115 - modelscope - WARNING - No preprocessor field found in cfg.\n",
+ "2024-12-27 16:15:12,116 - modelscope - WARNING - No val key and type key found in preprocessor domain of configuration.json file.\n",
+ "2024-12-27 16:15:12,116 - modelscope - WARNING - Cannot find available config to build preprocessor at mode inference, current config: {'model_dir': '/root/.cache/modelscope/hub/damo/nlp_corom_sentence-embedding_english-base'}. trying to build by task and model information.\n",
+ "2024-12-27 16:15:12,318 - modelscope - WARNING - No preprocessor field found in cfg.\n",
+ "2024-12-27 16:15:12,319 - modelscope - WARNING - No val key and type key found in preprocessor domain of configuration.json file.\n",
+ "2024-12-27 16:15:12,319 - modelscope - WARNING - Cannot find available config to build preprocessor at mode inference, current config: {'model_dir': '/root/.cache/modelscope/hub/damo/nlp_corom_sentence-embedding_english-base', 'sequence_length': 128}. trying to build by task and model information.\n"
+ ]
+ }
+ ],
+ "source": [
+ "from langchain_modelscope import ModelScopeEmbeddings\n",
+ "\n",
+ "embeddings = ModelScopeEmbeddings(\n",
+ " model_id=\"damo/nlp_corom_sentence-embedding_english-base\",\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "77d271b6",
+ "metadata": {},
+ "source": [
+ "## Indexing and Retrieval\n",
+ "\n",
+ "Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
+ "\n",
+ "Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "d817716b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/root/miniconda3/envs/langchain/lib/python3.10/site-packages/transformers/modeling_utils.py:1113: FutureWarning: The `device` argument is deprecated and will be removed in v5 of Transformers.\n",
+ " warnings.warn(\n",
+ "/root/miniconda3/envs/langchain/lib/python3.10/site-packages/transformers/modeling_utils.py:1113: FutureWarning: The `device` argument is deprecated and will be removed in v5 of Transformers.\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "'LangChain is the framework for building context-aware reasoning applications'"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Create a vector store with a sample text\n",
+ "from langchain_core.vectorstores import InMemoryVectorStore\n",
+ "\n",
+ "text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
+ "\n",
+ "vectorstore = InMemoryVectorStore.from_texts(\n",
+ " [text],\n",
+ " embedding=embeddings,\n",
+ ")\n",
+ "\n",
+ "# Use the vectorstore as a retriever\n",
+ "retriever = vectorstore.as_retriever()\n",
+ "\n",
+ "# Retrieve the most similar text\n",
+ "retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
+ "\n",
+ "# show the retrieved document's content\n",
+ "retrieved_documents[0].page_content"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e02b9855",
+ "metadata": {},
+ "source": [
+ "## Direct Usage\n",
+ "\n",
+ "Under the hood, the vectorstore and retriever implementations are calling `embeddings.embed_documents(...)` and `embeddings.embed_query(...)` to create embeddings for the text(s) used in `from_texts` and retrieval `invoke` operations, respectively.\n",
+ "\n",
+ "You can directly call these methods to get embeddings for your own use cases.\n",
+ "\n",
+ "### Embed single texts\n",
+ "\n",
+ "You can embed single texts or documents with `embed_query`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "0d2befcd",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[-0.6046376824378967, -0.3595953583717346, 0.11333226412534714, -0.030444221571087837, 0.23397332429\n"
+ ]
+ }
+ ],
+ "source": [
+ "single_vector = embeddings.embed_query(text)\n",
+ "print(str(single_vector)[:100]) # Show the first 100 characters of the vector"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1b5a7d03",
+ "metadata": {},
+ "source": [
+ "### Embed multiple texts\n",
+ "\n",
+ "You can embed multiple texts with `embed_documents`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "2f4d6e97",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[-0.6046381592750549, -0.3595949709415436, 0.11333223432302475, -0.030444379895925522, 0.23397321999\n",
+ "[-0.36103254556655884, -0.7602502107620239, 0.6505364775657654, 0.000658963865134865, 1.185304522514\n"
+ ]
+ }
+ ],
+ "source": [
+ "text2 = (\n",
+ " \"LangGraph is a library for building stateful, multi-actor applications with LLMs\"\n",
+ ")\n",
+ "two_vectors = embeddings.embed_documents([text, text2])\n",
+ "for vector in two_vectors:\n",
+ " print(str(vector)[:100]) # Show the first 100 characters of the vector"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "98785c12",
+ "metadata": {},
+ "source": [
+ "## API Reference\n",
+ "\n",
+ "For detailed documentation on `ModelScopeEmbeddings` features and configuration options, please refer to the [API reference](https://www.modelscope.cn/docs/sdk/pipelines).\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/text_embedding/modelscope_hub.ipynb b/docs/docs/integrations/text_embedding/modelscope_hub.ipynb
deleted file mode 100644
index b7d404e7beb4b2..00000000000000
--- a/docs/docs/integrations/text_embedding/modelscope_hub.ipynb
+++ /dev/null
@@ -1,90 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# ModelScope\n",
- "\n",
- ">[ModelScope](https://www.modelscope.cn/home) is big repository of the models and datasets.\n",
- "\n",
- "Let's load the ModelScope Embedding class."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from langchain_community.embeddings import ModelScopeEmbeddings"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "model_id = \"damo/nlp_corom_sentence-embedding_english-base\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "embeddings = ModelScopeEmbeddings(model_id=model_id)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "text = \"This is a test document.\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "query_result = embeddings.embed_query(text)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "doc_results = embeddings.embed_documents([\"foo\"])"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/docs/docs/integrations/tools/stripe.ipynb b/docs/docs/integrations/tools/stripe.ipynb
new file mode 100644
index 00000000000000..0edc4cdd6d6b89
--- /dev/null
+++ b/docs/docs/integrations/tools/stripe.ipynb
@@ -0,0 +1,196 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "10238e62-3465-4973-9279-606cbb7ccf16",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_label: Stripe\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a6f91f20",
+ "metadata": {},
+ "source": [
+ "# StripeAgentToolkit\n",
+ "\n",
+ "This notebook provides a quick overview for getting started with Stripe's agent toolkit.\n",
+ "\n",
+ "You can read more about `StripeAgentToolkit` in [Stripe's launch blog](https://stripe.dev/blog/adding-payments-to-your-agentic-workflows) or on the project's [PyPi page](https://pypi.org/project/stripe-agent-toolkit/).\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "### Integration details\n",
+ "\n",
+ "| Class | Package | Serializable | [JS Support](https://github.com/stripe/agent-toolkit?tab=readme-ov-file#typescript) | Package latest |\n",
+ "| :--- | :--- | :---: | :---: | :---: |\n",
+ "| StripeAgentToolkit | [stripe-agent-toolkit](https://pypi.org/project/stripe-agent-toolkit) | ❌ | ✅ | ![PyPI - Version](https://img.shields.io/pypi/v/stripe-agent-toolkit?style=flat-square&label=%20) |\n",
+ "\n",
+ "\n",
+ "## Setup\n",
+ "\n",
+ "This externally-managed package is hosted out of the `stripe-agent-toolkit` project, which is managed by Stripe's team.\n",
+ "\n",
+ "You can install it, along with langgraph for the following examples, with `pip`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "f85b4089",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.3.1\u001b[0m\n",
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
+ "Note: you may need to restart the kernel to use updated packages.\n"
+ ]
+ }
+ ],
+ "source": [
+ "%pip install --quiet -U langgraph stripe-agent-toolkit"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b15e9266",
+ "metadata": {},
+ "source": [
+ "### Credentials\n",
+ "\n",
+ "In addition to installing the package, you will need to configure the integration with your Stripe account's secret key, which is available in your [Stripe Dashboard](https://dashboard.stripe.com/account/apikeys)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e0b178a2-8816-40ca-b57c-ccdd86dde9c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import getpass\n",
+ "import os\n",
+ "\n",
+ "if not os.environ.get(\"STRIPE_SECRET_KEY\"):\n",
+ " os.environ[\"STRIPE_SECRET_KEY\"] = getpass.getpass(\"STRIPE API key:\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bc5ab717-fd27-4c59-b912-bdd099541478",
+ "metadata": {},
+ "source": [
+ "It's also helpful (but not needed) to set up [LangSmith](https://smith.langchain.com/) for best-in-class observability:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "a6c2f136-6367-4f1f-825d-ae741e1bf281",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
+ "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1c97218f-f366-479d-8bf7-fe9f2f6df73f",
+ "metadata": {},
+ "source": [
+ "## Instantiation\n",
+ "\n",
+ "Here we show how to create an instance of the Stripe Toolkit"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8b3ddfe9-ca79-494c-a7ab-1f56d9407a64",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from stripe_agent_toolkit.crewai.toolkit import StripeAgentToolkit\n",
+ "\n",
+ "stripe_agent_toolkit = StripeAgentToolkit(\n",
+ " secret_key=os.getenv(\"STRIPE_SECRET_KEY\"),\n",
+ " configuration={\n",
+ " \"actions\": {\n",
+ " \"payment_links\": {\n",
+ " \"create\": True,\n",
+ " },\n",
+ " }\n",
+ " },\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4f53188e",
+ "metadata": {},
+ "source": [
+ "## Agent\n",
+ "\n",
+ "Here's how to use the toolkit to create a basic agent in langgraph:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4975924e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_anthropic import ChatAnthropic\n",
+ "from langgraph.prebuilt import create_react_agent\n",
+ "\n",
+ "llm = ChatAnthropic(\n",
+ " model=\"claude-3-5-sonnet-20240620\",\n",
+ ")\n",
+ "\n",
+ "langgraph_agent_executor = create_react_agent(llm, stripe_agent_toolkit.get_tools())\n",
+ "\n",
+ "input_state = {\n",
+ " \"messages\": \"\"\"\n",
+ " Create a payment link for a new product called 'test' with a price\n",
+ " of $100. Come up with a funny description about buy bots,\n",
+ " maybe a haiku.\n",
+ " \"\"\",\n",
+ "}\n",
+ "\n",
+ "output_state = langgraph_agent_executor.invoke(input_state)\n",
+ "\n",
+ "print(output_state[\"messages\"][-1].content)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": ".venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/integrations/vectorstores/faiss.ipynb b/docs/docs/integrations/vectorstores/faiss.ipynb
index 0f957599065d02..26520baa6ac0d0 100644
--- a/docs/docs/integrations/vectorstores/faiss.ipynb
+++ b/docs/docs/integrations/vectorstores/faiss.ipynb
@@ -286,6 +286,52 @@
" print(f\"* {res.page_content} [{res.metadata}]\")"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "39cb1496",
+ "metadata": {},
+ "source": [
+ "Some [MongoDB query and projection operators](https://www.mongodb.com/docs/manual/reference/operator/query/) are supported for more advanced metadata filtering. The current list of supported operators are as follows:\n",
+ "- `$eq` (equals)\n",
+ "- `$neq` (not equals)\n",
+ "- `$gt` (greater than)\n",
+ "- `$lt` (less than)\n",
+ "- `$gte` (greater than or equal)\n",
+ "- `$lte` (less than or equal)\n",
+ "- `$in` (membership in list)\n",
+ "- `$nin` (not in list)\n",
+ "- `$and` (all conditions must match)\n",
+ "- `$or` (any condition must match)\n",
+ "- `$not` (negation of condition)\n",
+ "\n",
+ "Performing the same above similarity search with advanced metadata filtering can be done as follows:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1b3dd99d",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "* Building an exciting new project with LangChain - come check it out! [{'source': 'tweet'}]\n",
+ "* LangGraph is the best framework for building stateful, agentic applications! [{'source': 'tweet'}]\n"
+ ]
+ }
+ ],
+ "source": [
+ "results = vector_store.similarity_search(\n",
+ " \"LangChain provides abstractions to make working with LLMs easy\",\n",
+ " k=2,\n",
+ " filter={\"source\": {\"$eq\": \"tweet\"}},\n",
+ ")\n",
+ "for res in results:\n",
+ " print(f\"* {res.page_content} [{res.metadata}]\")"
+ ]
+ },
{
"cell_type": "markdown",
"id": "5ae35069",
diff --git a/docs/docs/integrations/vectorstores/faiss_async.ipynb b/docs/docs/integrations/vectorstores/faiss_async.ipynb
index e3bd1c90300abf..ec16f9e4d10ad4 100644
--- a/docs/docs/integrations/vectorstores/faiss_async.ipynb
+++ b/docs/docs/integrations/vectorstores/faiss_async.ipynb
@@ -397,6 +397,49 @@
" print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}\")"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "8dead085",
+ "metadata": {},
+ "source": [
+ "Some [MongoDB query and projection operators](https://www.mongodb.com/docs/manual/reference/operator/query/) are supported for more advanced metadata filtering. The current list of supported operators are as follows:\n",
+ "- `$eq` (equals)\n",
+ "- `$neq` (not equals)\n",
+ "- `$gt` (greater than)\n",
+ "- `$lt` (less than)\n",
+ "- `$gte` (greater than or equal)\n",
+ "- `$lte` (less than or equal)\n",
+ "- `$in` (membership in list)\n",
+ "- `$nin` (not in list)\n",
+ "- `$and` (all conditions must match)\n",
+ "- `$or` (any condition must match)\n",
+ "- `$not` (negation of condition)\n",
+ "\n",
+ "Performing the same above similarity search with advanced metadata filtering can be done as follows:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "af47c6f9",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Content: foo, Metadata: {'page': 1}\n"
+ ]
+ }
+ ],
+ "source": [
+ "results = await db.asimilarity_search(\n",
+ " \"foo\", filter={\"page\": {\"$eq\": 1}}, k=1, fetch_k=4\n",
+ ")\n",
+ "for doc in results:\n",
+ " print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}\")"
+ ]
+ },
{
"cell_type": "markdown",
"id": "1becca53",
diff --git a/docs/docs/tutorials/agents.ipynb b/docs/docs/tutorials/agents.ipynb
index 3eca90dc8d0436..9998cc7237076d 100644
--- a/docs/docs/tutorials/agents.ipynb
+++ b/docs/docs/tutorials/agents.ipynb
@@ -211,7 +211,7 @@
"source": [
"## Using Language Models\n",
"\n",
- "Next, let's learn how to use a language model by to call tools. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n",
+ "Next, let's learn how to use a language model to call tools. LangChain supports many different language models that you can use interchangably - select the one you want to use below!\n",
"\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
diff --git a/docs/scripts/partner_pkg_table.py b/docs/scripts/partner_pkg_table.py
index 04a605235ff784..a7bdc34be7fd39 100644
--- a/docs/scripts/partner_pkg_table.py
+++ b/docs/scripts/partner_pkg_table.py
@@ -98,7 +98,7 @@ def package_row(p: dict) -> str:
def table() -> str:
- header = """| Provider | Package | Downloads | Latest | [JS](https://js.langchain.com/docs/integrations/providers/) |
+ header = """| Provider | Package | Downloads | Latest | [JS](https://js.langchain.com/docs/integrations/platforms/) |
| :--- | :---: | :---: | :---: | :---: |
"""
return header + "\n".join(package_row(p) for p in packages_sorted)
diff --git a/libs/cli/poetry.lock b/libs/cli/poetry.lock
index 72cd18d604530c..5e84b31ac85b58 100644
--- a/libs/cli/poetry.lock
+++ b/libs/cli/poetry.lock
@@ -601,13 +601,13 @@ test = ["objgraph", "psutil"]
[[package]]
name = "gritql"
-version = "0.1.5"
+version = "0.2.0"
description = "Python bindings for GritQL"
optional = false
python-versions = "*"
files = [
- {file = "gritql-0.1.5-py2.py3-none-any.whl", hash = "sha256:b17b314d995a11b8e06839280b079ffc8a30bdfb0d2beebcb4332186a0b2cdf0"},
- {file = "gritql-0.1.5.tar.gz", hash = "sha256:7568ee2d7c7457000374c91289bacb05e92524c77a5d5f63fe777b29622bff4c"},
+ {file = "gritql-0.2.0-py2.py3-none-any.whl", hash = "sha256:6a37f4a6388c09801c25de8753546ca11d4b8a3ad527742821eb032ad069cd13"},
+ {file = "gritql-0.2.0.tar.gz", hash = "sha256:09e26e3d3152d3ec2e4fa80c0af4f2fe1436c82a2c6343cec6ab74ae61474bae"},
]
[[package]]
@@ -2068,4 +2068,4 @@ serve = []
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "c666eaa9945394483db2cf56ec2c147869b2fcefb767184c83c4f0e2f211ea2b"
+content-hash = "58adcda49e89173e501324ed1090cb1765057e46e94230ab90b734b6cec11ff9"
diff --git a/libs/cli/pyproject.toml b/libs/cli/pyproject.toml
index 2e3ee70fea7abd..467777bd24939e 100644
--- a/libs/cli/pyproject.toml
+++ b/libs/cli/pyproject.toml
@@ -18,7 +18,7 @@ gitpython = "^3"
langserve = { extras = ["all"], version = ">=0.0.51" }
uvicorn = ">=0.23,<1.0"
tomlkit = ">=0.12"
-gritql = "^0.1.1"
+gritql = "^0.2.0"
[tool.poetry.scripts]
langchain = "langchain_cli.cli:app"
diff --git a/libs/community/Makefile b/libs/community/Makefile
index 55b63f009b519f..ff8c2cdbe5ec64 100644
--- a/libs/community/Makefile
+++ b/libs/community/Makefile
@@ -16,7 +16,7 @@ coverage:
$(TEST_FILE)
test tests:
- poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
+ poetry run pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
integration_tests:
poetry run pytest $(TEST_FILE)
diff --git a/libs/community/langchain_community/agent_toolkits/slack/toolkit.py b/libs/community/langchain_community/agent_toolkits/slack/toolkit.py
index fd61311326d70d..a8ca7f564192b1 100644
--- a/libs/community/langchain_community/agent_toolkits/slack/toolkit.py
+++ b/libs/community/langchain_community/agent_toolkits/slack/toolkit.py
@@ -13,7 +13,14 @@
from langchain_community.tools.slack.utils import login
if TYPE_CHECKING:
+ # This is for linting and IDE typehints
from slack_sdk import WebClient
+else:
+ try:
+ # We do this so pydantic can resolve the types when instantiating
+ from slack_sdk import WebClient
+ except ImportError:
+ pass
class SlackToolkit(BaseToolkit):
diff --git a/libs/community/langchain_community/chat_models/friendli.py b/libs/community/langchain_community/chat_models/friendli.py
index a860ebf98bc593..76bf70c9af189d 100644
--- a/libs/community/langchain_community/chat_models/friendli.py
+++ b/libs/community/langchain_community/chat_models/friendli.py
@@ -75,12 +75,12 @@ class ChatFriendli(BaseChatModel, BaseFriendli):
from langchain_community.chat_models import FriendliChat
chat = Friendli(
- model="llama-2-13b-chat", friendli_token="YOUR FRIENDLI TOKEN"
+ model="meta-llama-3.1-8b-instruct", friendli_token="YOUR FRIENDLI TOKEN"
)
chat.invoke("What is generative AI?")
"""
- model: str = "llama-2-13b-chat"
+ model: str = "meta-llama-3.1-8b-instruct"
@property
def lc_secrets(self) -> Dict[str, str]:
diff --git a/libs/community/langchain_community/document_loaders/parsers/doc_intelligence.py b/libs/community/langchain_community/document_loaders/parsers/doc_intelligence.py
index 107e569339fb5f..78ff0223595553 100644
--- a/libs/community/langchain_community/document_loaders/parsers/doc_intelligence.py
+++ b/libs/community/langchain_community/document_loaders/parsers/doc_intelligence.py
@@ -79,7 +79,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
with blob.as_bytes_io() as file_obj:
poller = self.client.begin_analyze_document(
self.api_model,
- file_obj,
+ body=file_obj,
content_type="application/octet-stream",
output_content_format="markdown" if self.mode == "markdown" else "text",
)
@@ -97,8 +97,7 @@ def parse_url(self, url: str) -> Iterator[Document]:
poller = self.client.begin_analyze_document(
self.api_model,
- AnalyzeDocumentRequest(url_source=url),
- # content_type="application/octet-stream",
+ body=AnalyzeDocumentRequest(url_source=url),
output_content_format="markdown" if self.mode == "markdown" else "text",
)
result = poller.result()
@@ -115,8 +114,7 @@ def parse_bytes(self, bytes_source: bytes) -> Iterator[Document]:
poller = self.client.begin_analyze_document(
self.api_model,
- analyze_request=AnalyzeDocumentRequest(bytes_source=bytes_source),
- # content_type="application/octet-stream",
+ body=AnalyzeDocumentRequest(bytes_source=bytes_source),
output_content_format="markdown" if self.mode == "markdown" else "text",
)
result = poller.result()
diff --git a/libs/community/langchain_community/document_loaders/parsers/pdf.py b/libs/community/langchain_community/document_loaders/parsers/pdf.py
index 479904b243ec32..1ff940d008f5a5 100644
--- a/libs/community/langchain_community/document_loaders/parsers/pdf.py
+++ b/libs/community/langchain_community/document_loaders/parsers/pdf.py
@@ -2049,12 +2049,6 @@ class ZeroxPDFParser(BaseBlobParser):
"Do not exclude any content from the page. ",
)
- @staticmethod
- def _is_valid_url(url: str) -> bool:
- """Check if the url is valid."""
- parsed = urlparse(url)
- return bool(parsed.netloc) and bool(parsed.scheme)
-
def __init__(
self,
mode: Literal["single", "page"] = "page",
@@ -2174,7 +2168,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-ty
else:
file_path = str(blob.path)
- with blob.as_bytes_io() as pdf_file_obj, TemporaryDirectory() as tempdir:
+ with blob.as_bytes_io() as pdf_file_obj:
doc_metadata = purge_metadata(self._get_metadata(pdf_file_obj))
doc_metadata["source"] = blob.source or blob.path
@@ -2238,7 +2232,6 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-ty
if temp_file:
temp_file.close()
-
def _get_metadata(
self,
fp: BinaryIO,
diff --git a/libs/community/langchain_community/llms/azureml_endpoint.py b/libs/community/langchain_community/llms/azureml_endpoint.py
index 5c34ca1547743a..bfe61fdbc5fa51 100644
--- a/libs/community/langchain_community/llms/azureml_endpoint.py
+++ b/libs/community/langchain_community/llms/azureml_endpoint.py
@@ -434,7 +434,8 @@ def validate_endpoint_url(cls, field_value: Any) -> str:
raise ValueError(
"`endpoint_url` should contain the full invocation URL including "
"`/score` for `endpoint_api_type='dedicated'` or `/completions` "
- "or `/chat/completions` for `endpoint_api_type='serverless'`"
+ "or `/chat/completions` or `/models/chat/completions` "
+ "for `endpoint_api_type='serverless'`"
)
return field_value
@@ -455,16 +456,19 @@ def validate_endpoint_api_type(
"Endpoints of type `dedicated` should follow the format "
"`https://..inference.ml.azure.com/score`."
" If your endpoint URL ends with `/completions` or"
- "`/chat/completions`, use `endpoint_api_type='serverless'` instead."
+ "`/chat/completions` or `/models/chat/completions`,"
+ "use `endpoint_api_type='serverless'` instead."
)
if field_value == AzureMLEndpointApiType.serverless and not (
endpoint_url.endswith("/completions") # type: ignore[union-attr]
or endpoint_url.endswith("/chat/completions") # type: ignore[union-attr]
+ or endpoint_url.endswith("/models/chat/completions") # type: ignore[union-attr]
):
raise ValueError(
"Endpoints of type `serverless` should follow the format "
- "`https://..inference.ml.azure.com/chat/completions`"
+ "`https://..inference.ml.azure.com/completions`"
" or `https://..inference.ml.azure.com/chat/completions`"
+ " or `https://..inference.ml.azure.com/models/chat/completions`"
)
return field_value
diff --git a/libs/community/langchain_community/llms/friendli.py b/libs/community/langchain_community/llms/friendli.py
index 74b1ef7d199f86..d33c80eb39a426 100644
--- a/libs/community/langchain_community/llms/friendli.py
+++ b/libs/community/langchain_community/llms/friendli.py
@@ -16,14 +16,19 @@
from pydantic import Field, SecretStr
-def _stream_response_to_generation_chunk(stream_response: Any) -> GenerationChunk:
+def _stream_response_to_generation_chunk(
+ stream_response: Any,
+) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
- if stream_response.event == "token_sampled":
- return GenerationChunk(
- text=stream_response.text,
- generation_info={"token": str(stream_response.token)},
- )
- return GenerationChunk(text="")
+ if not stream_response.get("choices", None):
+ return GenerationChunk(text="")
+ return GenerationChunk(
+ text=stream_response.choices[0].text,
+ # generation_info=dict(
+ # finish_reason=stream_response.choices[0].get("finish_reason", None),
+ # logprobs=stream_response.choices[0].get("logprobs", None),
+ # ),
+ )
class BaseFriendli(Serializable):
@@ -34,7 +39,7 @@ class BaseFriendli(Serializable):
# Friendli Async client.
async_client: Any = Field(default=None, exclude=True)
# Model name to use.
- model: str = "mixtral-8x7b-instruct-v0-1"
+ model: str = "meta-llama-3.1-8b-instruct"
# Friendli personal access token to run as.
friendli_token: Optional[SecretStr] = None
# Friendli team ID to run as.
@@ -107,7 +112,7 @@ class Friendli(LLM, BaseFriendli):
from langchain_community.llms import Friendli
friendli = Friendli(
- model="mixtral-8x7b-instruct-v0-1", friendli_token="YOUR FRIENDLI TOKEN"
+ model="meta-llama-3.1-8b-instruct", friendli_token="YOUR FRIENDLI TOKEN"
)
"""
diff --git a/libs/community/langchain_community/tools/slack/base.py b/libs/community/langchain_community/tools/slack/base.py
index 23a3fa6c99f941..38eb8f7d3f3c4d 100644
--- a/libs/community/langchain_community/tools/slack/base.py
+++ b/libs/community/langchain_community/tools/slack/base.py
@@ -10,7 +10,14 @@
from langchain_community.tools.slack.utils import login
if TYPE_CHECKING:
+ # This is for linting and IDE typehints
from slack_sdk import WebClient
+else:
+ try:
+ # We do this so pydantic can resolve the types when instantiating
+ from slack_sdk import WebClient
+ except ImportError:
+ pass
class SlackBaseTool(BaseTool): # type: ignore[override]
diff --git a/libs/community/langchain_community/utilities/duckduckgo_search.py b/libs/community/langchain_community/utilities/duckduckgo_search.py
index ede5c12298710f..328161e7cc2778 100644
--- a/libs/community/langchain_community/utilities/duckduckgo_search.py
+++ b/libs/community/langchain_community/utilities/duckduckgo_search.py
@@ -28,9 +28,9 @@ class DuckDuckGoSearchAPIWrapper(BaseModel):
Options: d, w, m, y
"""
max_results: int = 5
- backend: str = "api"
+ backend: str = "auto"
"""
- Options: api, html, lite
+ Options: auto, html, lite
"""
source: str = "text"
"""
diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock
index bf755468c29af4..08de8fb98e1fd3 100644
--- a/libs/community/poetry.lock
+++ b/libs/community/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -971,6 +971,20 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.1"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
+ {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "executing"
version = "2.1.0"
@@ -3254,6 +3268,26 @@ files = [
[package.dependencies]
watchdog = ">=2.0.0"
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -3858,14 +3892,61 @@ description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"},
{file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"},
{file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"},
]
@@ -4634,4 +4715,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "fe58adfe92b997ebd50a0b1f36bb4a4a2296a5e2203ced651363302a246655b1"
+content-hash = "92c00e0689d9f6cec3122bf6faf8e4fac3829f1cdd33b21a39ec92e1b5aa8585"
diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml
index 6053686eed041f..9e84660b9390ce 100644
--- a/libs/community/pyproject.toml
+++ b/libs/community/pyproject.toml
@@ -102,6 +102,7 @@ pytest-mock = "^3.10.0"
pytest-socket = "^0.6.0"
syrupy = "^4.0.2"
requests-mock = "^1.11.0"
+pytest-xdist = "^3.6.1"
[[tool.poetry.group.test.dependencies.cffi]]
version = "<1.17.1"
python = "<3.10"
diff --git a/libs/community/tests/unit_tests/llms/test_friendli.py b/libs/community/tests/unit_tests/llms/test_friendli.py
index 6fd4593d93d3b9..18cf5c3364425e 100644
--- a/libs/community/tests/unit_tests/llms/test_friendli.py
+++ b/libs/community/tests/unit_tests/llms/test_friendli.py
@@ -114,14 +114,14 @@ async def test_friendli_ainvoke(
@pytest.mark.requires("friendli")
def test_friendli_stream(mock_friendli_client: Mock, friendli_llm: Friendli) -> None:
"""Test stream with friendli."""
+ mock_choice_0 = Mock()
+ mock_choice_0.text = "Hello "
+ mock_choice_1 = Mock()
+ mock_choice_1.text = "langchain"
mock_chunk_0 = Mock()
- mock_chunk_0.event = "token_sampled"
- mock_chunk_0.text = "Hello "
- mock_chunk_0.token = 0
+ mock_chunk_0.choices = [mock_choice_0]
mock_chunk_1 = Mock()
- mock_chunk_1.event = "token_sampled"
- mock_chunk_1.text = "Friendli"
- mock_chunk_1.token = 1
+ mock_chunk_1.choices = [mock_choice_1]
mock_stream = MagicMock()
mock_chunks = [mock_chunk_0, mock_chunk_1]
mock_stream.__iter__.return_value = mock_chunks
@@ -129,7 +129,7 @@ def test_friendli_stream(mock_friendli_client: Mock, friendli_llm: Friendli) ->
mock_friendli_client.completions.create.return_value = mock_stream
stream = friendli_llm.stream("Hello langchain")
for i, chunk in enumerate(stream):
- assert chunk == mock_chunks[i].text
+ assert chunk == mock_chunks[i].choices[0].text
mock_friendli_client.completions.create.assert_called_once_with(
model=friendli_llm.model,
@@ -149,22 +149,22 @@ async def test_friendli_astream(
mock_friendli_async_client: AsyncMock, friendli_llm: Friendli
) -> None:
"""Test async stream with friendli."""
+ mock_choice_0 = Mock()
+ mock_choice_0.text = "Hello "
+ mock_choice_1 = Mock()
+ mock_choice_1.text = "langchain"
mock_chunk_0 = Mock()
- mock_chunk_0.event = "token_sampled"
- mock_chunk_0.text = "Hello "
- mock_chunk_0.token = 0
+ mock_chunk_0.choices = [mock_choice_0]
mock_chunk_1 = Mock()
- mock_chunk_1.event = "token_sampled"
- mock_chunk_1.text = "Friendli"
- mock_chunk_1.token = 1
+ mock_chunk_1.choices = [mock_choice_1]
mock_stream = AsyncMock()
mock_chunks = [mock_chunk_0, mock_chunk_1]
- mock_stream.__aiter__.return_value = mock_chunks
+ mock_stream.__aiter__.return_value = iter(mock_chunks)
mock_friendli_async_client.completions.create.return_value = mock_stream
stream = friendli_llm.astream("Hello langchain")
async for i, chunk in aenumerate(stream):
- assert chunk == mock_chunks[i].text
+ assert chunk == mock_chunks[i].choices[0].text
mock_friendli_async_client.completions.create.assert_awaited_once_with(
model=friendli_llm.model,
diff --git a/libs/community/tests/unit_tests/test_dependencies.py b/libs/community/tests/unit_tests/test_dependencies.py
index a5c8c7c12c100a..0368f3a1436244 100644
--- a/libs/community/tests/unit_tests/test_dependencies.py
+++ b/libs/community/tests/unit_tests/test_dependencies.py
@@ -91,6 +91,7 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"pytest-mock",
"pytest-socket",
"pytest-watcher",
+ "pytest-xdist",
"responses",
"syrupy",
"requests-mock",
diff --git a/libs/core/Makefile b/libs/core/Makefile
index 61ac2dcc6971ef..169e49a4979671 100644
--- a/libs/core/Makefile
+++ b/libs/core/Makefile
@@ -12,7 +12,7 @@ test tests:
-u LANGCHAIN_API_KEY \
-u LANGSMITH_TRACING \
-u LANGCHAIN_PROJECT \
- poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
+ poetry run pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
test_watch:
env \
diff --git a/libs/core/langchain_core/_api/deprecation.py b/libs/core/langchain_core/_api/deprecation.py
index c7d1e87b3bac8a..0e254053d88199 100644
--- a/libs/core/langchain_core/_api/deprecation.py
+++ b/libs/core/langchain_core/_api/deprecation.py
@@ -360,10 +360,9 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
_addendum,
]
details = " ".join([component.strip() for component in components if component])
- package = (
- _package or _name.split(".")[0].replace("_", "-") if "." in _name else None
+ package = _package or (
+ _name.split(".")[0].replace("_", "-") if "." in _name else None
)
- since_str = f"{package}=={since}" if package else since
if removal:
if removal.startswith("1.") and package and package.startswith("langchain"):
removal_str = f"It will not be removed until {package}=={removal}."
@@ -372,7 +371,7 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
else:
removal_str = ""
new_doc = f"""\
-.. deprecated:: {since_str} {details} {removal_str}
+.. deprecated:: {since} {details} {removal_str}
{old_doc}\
"""
diff --git a/libs/core/langchain_core/prompts/base.py b/libs/core/langchain_core/prompts/base.py
index 042b2d77ea80f1..803302f1898d01 100644
--- a/libs/core/langchain_core/prompts/base.py
+++ b/libs/core/langchain_core/prompts/base.py
@@ -264,7 +264,7 @@ def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate
"""Return a partial of the prompt template.
Args:
- kwargs: Union[str, Callable[[], str], partial variables to set.
+ kwargs: Union[str, Callable[[], str]], partial variables to set.
Returns:
BasePromptTemplate: A partial of the prompt template.
diff --git a/libs/core/poetry.lock b/libs/core/poetry.lock
index 53962198d8297b..eadfb8d460c701 100644
--- a/libs/core/poetry.lock
+++ b/libs/core/poetry.lock
@@ -523,6 +523,20 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.1"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
+ {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "executing"
version = "2.1.0"
@@ -2160,6 +2174,26 @@ files = [
tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -3104,4 +3138,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "5accfdfd412486fbf7bb3ef18f00e75db40599034428651ef014b0bc3927ddfa"
+content-hash = "65d2f612fead6395befc285353347bf82d09044ce832c278f8b35e4f179caebb"
diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml
index 2bba5c7ff609d4..79d46c3f677385 100644
--- a/libs/core/pyproject.toml
+++ b/libs/core/pyproject.toml
@@ -84,17 +84,20 @@ classmethod-decorators = [ "classmethod", "langchain_core.utils.pydantic.pre_ini
[tool.poetry.group.lint.dependencies]
ruff = "^0.5"
+
[tool.poetry.group.typing.dependencies]
mypy = ">=1.10,<1.11"
types-pyyaml = "^6.0.12.2"
types-requests = "^2.28.11.5"
types-jinja2 = "^2.11.9"
+
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"
setuptools = "^67.6.1"
grandalf = "^0.8"
+
[tool.poetry.group.test.dependencies]
pytest = "^8"
freezegun = "^1.2.2"
@@ -105,6 +108,7 @@ pytest-asyncio = "^0.21.1"
grandalf = "^0.8"
responses = "^0.25.0"
pytest-socket = "^0.7.0"
+pytest-xdist = "^3.6.1"
[[tool.poetry.group.test.dependencies.numpy]]
version = "^1.24.0"
python = "<3.12"
@@ -113,12 +117,15 @@ python = "<3.12"
version = ">=1.26.0,<3"
python = ">=3.12"
+
[tool.poetry.group.test_integration.dependencies]
+
[tool.poetry.group.typing.dependencies.langchain-text-splitters]
path = "../text-splitters"
develop = true
+
[tool.poetry.group.test.dependencies.langchain-tests]
path = "../standard-tests"
develop = true
diff --git a/libs/langchain/Makefile b/libs/langchain/Makefile
index e06cd2e65d1a94..4da2edd8115603 100644
--- a/libs/langchain/Makefile
+++ b/libs/langchain/Makefile
@@ -19,7 +19,7 @@ coverage:
$(TEST_FILE)
test tests:
- poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
+ poetry run pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
extended_tests:
poetry run pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests
diff --git a/libs/langchain/langchain/_api/deprecation.py b/libs/langchain/langchain/_api/deprecation.py
index ecd5a71b8964f3..6d5f2cf6895f17 100644
--- a/libs/langchain/langchain/_api/deprecation.py
+++ b/libs/langchain/langchain/_api/deprecation.py
@@ -11,11 +11,13 @@
"LangChain agents will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
- "tool-calling, persistence of state, and human-in-the-loop workflows. See "
- "LangGraph documentation for more details: "
- "https://langchain-ai.github.io/langgraph/. Refer here for its pre-built "
- "ReAct agent: "
- "https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/"
+ "tool-calling, persistence of state, and human-in-the-loop workflows. For "
+ "details, refer to the "
+ "`LangGraph documentation `_"
+ " as well as guides for "
+ "`Migrating from AgentExecutor `_" # noqa: E501
+ " and LangGraph's "
+ "`Pre-built ReAct agent `_." # noqa: E501
)
diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock
index f892ddfcda4e02..592c99e27192ad 100644
--- a/libs/langchain/poetry.lock
+++ b/libs/langchain/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -1043,6 +1043,20 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.1"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
+ {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "executing"
version = "2.1.0"
@@ -3443,6 +3457,26 @@ files = [
[package.dependencies]
watchdog = ">=2.0.0"
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -4150,14 +4184,61 @@ description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"},
+ {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"},
{file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"},
+ {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"},
{file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"},
+ {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"},
+ {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"},
+ {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"},
+ {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"},
+ {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"},
{file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"},
{file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"},
]
@@ -4532,20 +4613,6 @@ files = [
cryptography = ">=35.0.0"
types-pyOpenSSL = "*"
-[[package]]
-name = "types-requests"
-version = "2.31.0.6"
-description = "Typing stubs for requests"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"},
- {file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"},
-]
-
-[package.dependencies]
-types-urllib3 = "*"
-
[[package]]
name = "types-requests"
version = "2.32.0.20241016"
@@ -4582,17 +4649,6 @@ files = [
{file = "types_toml-0.10.8.20240310-py3-none-any.whl", hash = "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d"},
]
-[[package]]
-name = "types-urllib3"
-version = "1.26.25.14"
-description = "Typing stubs for urllib3"
-optional = false
-python-versions = "*"
-files = [
- {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"},
- {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"},
-]
-
[[package]]
name = "typing-extensions"
version = "4.12.2"
@@ -4629,22 +4685,6 @@ files = [
[package.extras]
dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"]
-[[package]]
-name = "urllib3"
-version = "1.26.20"
-description = "HTTP library with thread-safe connection pooling, file post, and more."
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
-files = [
- {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
- {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
-]
-
-[package.extras]
-brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
-secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
-socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
-
[[package]]
name = "urllib3"
version = "2.2.3"
@@ -4662,6 +4702,23 @@ h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
+[[package]]
+name = "vcrpy"
+version = "4.3.0"
+description = "Automatically mock your HTTP interactions to simplify and speed up testing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "vcrpy-4.3.0-py2.py3-none-any.whl", hash = "sha256:8fbd4be412e8a7f35f623dd61034e6380a1c8dbd0edf6e87277a3289f6e98093"},
+ {file = "vcrpy-4.3.0.tar.gz", hash = "sha256:49c270ce67e826dba027d83e20d25b67a5885487697e97bca6dbdf53d750a0ac"},
+]
+
+[package.dependencies]
+PyYAML = "*"
+six = ">=1.5"
+wrapt = "*"
+yarl = "*"
+
[[package]]
name = "vcrpy"
version = "6.0.2"
@@ -4675,10 +4732,7 @@ files = [
[package.dependencies]
PyYAML = "*"
-urllib3 = [
- {version = "<2", markers = "platform_python_implementation == \"PyPy\" or python_version < \"3.10\""},
- {version = "*", markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\""},
-]
+urllib3 = {version = "*", markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\""}
wrapt = "*"
yarl = "*"
@@ -4979,4 +5033,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "da2bb8e1aa0cdc1fd625160145acf9441b78e03b8e3298304f49dfcb3ad17161"
+content-hash = "1113adf90d5867bd2c173e9022b6eee5ebfa5f77176d0eba67326d38ea5ca1f9"
diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml
index e05fd98080b36b..06a49b665197ff 100644
--- a/libs/langchain/pyproject.toml
+++ b/libs/langchain/pyproject.toml
@@ -99,6 +99,7 @@ pytest-mock = "^3.10.0"
pytest-socket = "^0.6.0"
syrupy = "^4.0.2"
requests-mock = "^1.11.0"
+pytest-xdist = "^3.6.1"
[[tool.poetry.group.test.dependencies.cffi]]
version = "<1.17.1"
python = "<3.10"
diff --git a/libs/langchain/tests/unit_tests/test_dependencies.py b/libs/langchain/tests/unit_tests/test_dependencies.py
index 664e313e0f5aa1..285daa620173f5 100644
--- a/libs/langchain/tests/unit_tests/test_dependencies.py
+++ b/libs/langchain/tests/unit_tests/test_dependencies.py
@@ -91,6 +91,7 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"pytest-mock",
"pytest-socket",
"pytest-watcher",
+ "pytest-xdist",
"responses",
"syrupy",
"requests-mock",
diff --git a/libs/packages.yml b/libs/packages.yml
index 810e54c5a857b2..a9a9df2b7e727c 100644
--- a/libs/packages.yml
+++ b/libs/packages.yml
@@ -308,3 +308,12 @@ packages:
repo: crate/langchain-cratedb
downloads: 362
downloads_updated_at: '2024-12-23T20:53:27.001852+00:00'
+- name: langchain-modelscope
+ path: .
+ repo: modelscope/langchain-modelscope
+ downloads: 0
+- name: langchain-falkordb
+ path: .
+ repo: kingtroga/langchain-falkordb
+ downloads: 610
+ downloads_updated_at: '2025-01-02T20:23:02.544257+00:00'
diff --git a/libs/text-splitters/Makefile b/libs/text-splitters/Makefile
index f806ab5f97b1fd..c5fcfecc05709b 100644
--- a/libs/text-splitters/Makefile
+++ b/libs/text-splitters/Makefile
@@ -7,7 +7,7 @@ all: help
TEST_FILE ?= tests/unit_tests/
test tests:
- poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
+ poetry run pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
integration_test integration_tests:
poetry run pytest tests/integration_tests/
diff --git a/libs/text-splitters/poetry.lock b/libs/text-splitters/poetry.lock
index b85cb94d34b023..6a20fdb73eb257 100644
--- a/libs/text-splitters/poetry.lock
+++ b/libs/text-splitters/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -666,6 +666,20 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.1"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
+ {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "executing"
version = "2.1.0"
@@ -2213,6 +2227,7 @@ description = "Nvidia JIT LTO Library"
optional = false
python-versions = ">=3"
files = [
+ {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83"},
{file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57"},
{file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1"},
]
@@ -2870,6 +2885,26 @@ files = [
tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -4768,4 +4803,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "a756bff9e65722e2109c6187b057060fdc601074bca0f22035a9f3c4d63b04e2"
+content-hash = "b7eb1002788ae30d0aaa8872266ee1fa12bd6f845ba3fbf76a8785f6425da25c"
diff --git a/libs/text-splitters/pyproject.toml b/libs/text-splitters/pyproject.toml
index 683d00b9ceb1e5..0fe92590277109 100644
--- a/libs/text-splitters/pyproject.toml
+++ b/libs/text-splitters/pyproject.toml
@@ -75,6 +75,7 @@ convention = "google"
ruff = "^0.5"
+
[tool.poetry.group.typing.dependencies]
mypy = "^1.10"
lxml-stubs = "^0.5.1"
@@ -82,11 +83,13 @@ types-requests = "^2.31.0.20240218"
tiktoken = "^0.8.0"
+
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"
+
[tool.poetry.group.test.dependencies]
pytest = "^8"
freezegun = "^1.2.2"
@@ -94,6 +97,8 @@ pytest-mock = "^3.10.0"
pytest-watcher = "^0.3.4"
pytest-asyncio = "^0.21.1"
pytest-socket = "^0.7.0"
+pytest-xdist = "^3.6.1"
+
[tool.poetry.group.test_integration]
@@ -106,16 +111,19 @@ transformers = "^4.47.0"
sentence-transformers = { version = ">=2.6.0", python = "<3.13" }
+
[tool.poetry.group.lint.dependencies.langchain-core]
path = "../core"
develop = true
+
[tool.poetry.group.dev.dependencies.langchain-core]
path = "../core"
develop = true
+
[tool.poetry.group.test.dependencies.langchain-core]
path = "../core"
develop = true