Skip to content

Commit 22415c7

Browse files
berkecanrizaiManul from Pathway
authored and
Manul from Pathway
committed
use new rag api names/endpts for the showcases (#8376)
GitOrigin-RevId: 7441cf0cd5beee7674a7177370e63bb2940140c0
1 parent 7d30d9b commit 22415c7

File tree

6 files changed

+20
-20
lines changed

6 files changed

+20
-20
lines changed

docs/2.developers/6.ai-pipelines/30.REST-API.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ The REST API provides the following endpoints:
2121
|----------------------------|----------|---------------------------|---------------------------------------------------------------------------------|
2222
| [Retrieve Closest Documents](#retrieve-closest-documents) | POST | `/v1/query` | Retrieve the closest documents from the vector store based on a query. |
2323
| [Retrieve Vector Store Statistics](#retrieve-vector-store-statistics) | GET | `/v1/statistics` | Retrieve statistics about the vector store. |
24-
| [Answer with RAG](#answer-with-rag) | POST | `/v1/pw_ai_answer` | Generate an answer to a query using a RAG pipeline. |
25-
| [Summarize Texts](#summarize-texts) | POST | `/v1/pw_ai_summary` | Summarize a list of texts. |
26-
| [List Indexed Documents](#list-indexed-documents) | POST | `/v1/pw_list_documents` | List documents indexed in the vector store. |
24+
| [Answer with RAG](#answer-with-rag) | POST | `/v2/answer` | Generate an answer to a query using a RAG pipeline. |
25+
| [Summarize Texts](#summarize-texts) | POST | `/v2/summarize` | Summarize a list of texts. |
26+
| [List Indexed Documents](#list-indexed-documents) | POST | `/v2/list_documents` | List documents indexed in the vector store. |
2727

2828

2929
## Retrieve Closest Documents
@@ -75,7 +75,7 @@ Retrieve statistical information about the vector store, such as file counts and
7575

7676
## Answer with RAG
7777

78-
**Endpoint**: `POST /v1/pw_ai_answer`
78+
**Endpoint**: `POST /v2/answer`
7979

8080
Provide a response to a query using a RAG pipeline.
8181

@@ -104,7 +104,7 @@ Provide a response to a query using a RAG pipeline.
104104

105105
## Summarize Texts
106106

107-
**Endpoint**: `POST /v1/pw_ai_summary`
107+
**Endpoint**: `POST /v2/summarize`
108108

109109
Summarize a list of texts.
110110

@@ -131,7 +131,7 @@ Summarize a list of texts.
131131

132132
## List Indexed Documents
133133

134-
**Endpoint**: `POST /v1/pw_list_documents`
134+
**Endpoint**: `POST /v2/list_documents`
135135

136136
List documents currently indexed in the vector store, with optional filtering or metadata selection.
137137

examples/notebooks/showcases/multimodal-rag-using-Gemini.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -604,7 +604,7 @@
604604
"source": [
605605
"# Example usage\n",
606606
"\n",
607-
"response = client.pw_ai_answer(\"What is the Total Stockholders' equity as of December 31, 2022?\")\n",
607+
"response = client.answer(\"What is the Total Stockholders' equity as of December 31, 2022?\")\n",
608608
"print(response)\n"
609609
]
610610
},
@@ -651,4 +651,4 @@
651651
},
652652
"nbformat": 4,
653653
"nbformat_minor": 5
654-
}
654+
}

examples/notebooks/tutorials/rag-evaluations.ipynb

+5-5
Original file line numberDiff line numberDiff line change
@@ -672,7 +672,7 @@
672672
"from pathway.xpacks.llm.question_answering import RAGClient\n",
673673
"\n",
674674
"pathway_client = RAGClient(pathway_host, pathway_port)\n",
675-
"pathway_client.pw_list_documents()"
675+
"pathway_client.list_documents()"
676676
]
677677
},
678678
{
@@ -715,7 +715,7 @@
715715
" if verbose:\n",
716716
" print(f\"Predicting question: {single_sample.user_input}\")\n",
717717
"\n",
718-
" pw_response: dict = pathway_client.pw_ai_answer(\n",
718+
" pw_response: dict = pathway_client.answer(\n",
719719
" prompt=single_sample.user_input, return_context_docs=True\n",
720720
" )\n",
721721
" resp: str = pw_response[\"response\"]\n",
@@ -1047,7 +1047,7 @@
10471047
"outputs": [],
10481048
"source": [
10491049
"pathway_client = RAGClient(pathway_host, pathway_port)\n",
1050-
"pathway_client.pw_list_documents()"
1050+
"pathway_client.list_documents()"
10511051
]
10521052
},
10531053
{
@@ -1212,7 +1212,7 @@
12121212
"outputs": [],
12131213
"source": [
12141214
"pathway_client = RAGClient(pathway_host, pathway_port)\n",
1215-
"pathway_client.pw_list_documents()"
1215+
"pathway_client.list_documents()"
12161216
]
12171217
},
12181218
{
@@ -1749,4 +1749,4 @@
17491749
},
17501750
"nbformat": 4,
17511751
"nbformat_minor": 5
1752-
}
1752+
}

integration_tests/rag_evals/connector.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def _get_request_headers(self):
137137

138138

139139
class RagConnector:
140-
"""Rag connector for evals. Returns context docs in `pw_ai_answer_question`."""
140+
"""Rag connector for evals. Returns context docs in `answer_question`."""
141141

142142
def __init__(self, base_url: str):
143143
self.base_url = base_url
@@ -146,7 +146,7 @@ def __init__(self, base_url: str):
146146
url=base_url,
147147
)
148148

149-
def pw_ai_answer_question(
149+
def answer_question(
150150
self,
151151
prompt,
152152
filter=None,
@@ -171,7 +171,7 @@ def pw_ai_answer_question(
171171

172172
return response
173173

174-
def pw_list_documents(self, filter=None, keys=["path"]):
174+
def list_documents(self, filter=None, keys=["path"]):
175175
api_url = f"{self.base_url}/v2/list_documents"
176176
payload = {}
177177

integration_tests/rag_evals/evaluator.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ def predicted_dataset_as_dict_list(self) -> list[dict]:
210210

211211
def _predict_single(self, question: str, file: str) -> dict:
212212
filter = create_file_filter(file)
213-
answer = self.connector.pw_ai_answer_question(
213+
answer = self.connector.answer_question(
214214
question,
215215
filter,
216216
)
@@ -219,7 +219,7 @@ def _predict_single(self, question: str, file: str) -> dict:
219219
async def _apredict_single(self, question: str, file: str) -> dict:
220220
filter = create_file_filter(file)
221221
answer = await asyncio.to_thread( # TODO: convert to await with async client
222-
self.connector.pw_ai_answer_question,
222+
self.connector.answer_question,
223223
question,
224224
filter,
225225
)

integration_tests/rag_evals/test_eval.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def wait_for_start(retries: int = 10, interval: int | float = 45.0) -> bool:
8585
)
8686
logging.info(f"Indexed documents: {docs}")
8787
try:
88-
docs = conn.pw_list_documents()
88+
docs = conn.list_documents()
8989
if docs and len(docs) >= EXPECTED_DOCS_COUNT:
9090
logging.info(
9191
f"Fetched docs: ({len(docs)}) List: {docs}, \
@@ -115,7 +115,7 @@ def checker() -> bool:
115115
logging.error("Server was not started properly.")
116116
return False
117117

118-
docs = conn.pw_list_documents()
118+
docs = conn.list_documents()
119119

120120
logging.info(f"Indexed test documents: {docs}")
121121

0 commit comments

Comments
 (0)