Skip to content

Commit 170a8a8

Browse files
author
elizabethfuentes12
committed
Video and Audio Content Analysis notebook
1 parent 3b05f1f commit 170a8a8

13 files changed

+1700
-7
lines changed

notebooks/01_build_pdf_vector_db.ipynb

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,21 @@
3636
},
3737
{
3838
"cell_type": "code",
39-
"execution_count": null,
40-
"metadata": {},
41-
"outputs": [],
39+
"execution_count": 1,
40+
"metadata": {},
41+
"outputs": [
42+
{
43+
"ename": "ModuleNotFoundError",
44+
"evalue": "No module named 'langchain_aws'",
45+
"output_type": "error",
46+
"traceback": [
47+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
48+
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
49+
"Cell \u001b[0;32mIn[1], line 5\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtext_splitter\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m RecursiveCharacterTextSplitter \u001b[38;5;66;03m# to split documents into smaller chunks.\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain_community\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mvectorstores\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m FAISS \u001b[38;5;66;03m# to store the documents in a vector database.\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain_aws\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m BedrockEmbeddings \u001b[38;5;66;03m# to create embeddings for the documents.\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain_experimental\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtext_splitter\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m SemanticChunker \u001b[38;5;66;03m# to split documents into smaller chunks.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m#https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.text_splitter\u001b[39;00m\n",
50+
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'langchain_aws'"
51+
]
52+
}
53+
],
4254
"source": [
4355
"import boto3 # to interact with AWS services.\n",
4456
"from langchain_community.document_loaders import PyPDFLoader, PyPDFDirectoryLoader # to load documents from PDF files.\n",

notebooks/04_video_understanding.ipynb

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161
},
6262
{
6363
"cell_type": "code",
64-
"execution_count": null,
64+
"execution_count": 1,
6565
"metadata": {},
6666
"outputs": [],
6767
"source": [
@@ -84,8 +84,7 @@
8484
"source": [
8585
"bedrock_client = boto3.client(\"bedrock-runtime\", region_name='us-east-1') \n",
8686
"boto3_bedrock = boto3.client('bedrock', region_name='us-east-1')\n",
87-
"s3_client = boto3.client('s3')\n",
88-
"s3_uri = \"s3://reinventagentstack-bucketagendabucket1c1c4a36-iiafx6tdvrak/video_demo/\""
87+
"s3_client = boto3.client('s3')"
8988
]
9089
},
9190
{

0 commit comments

Comments
 (0)