Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 7a72928

Browse files
committedJun 20, 2025·
feat(api): add new endpoints
1 parent b8729dd commit 7a72928

40 files changed

+6876
-2
lines changed
 

‎.stats.yml‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
configured_endpoints: 44
1+
configured_endpoints: 65
22
openapi_spec_hash: eeb8ebc5600523bdfad046381a929572
3-
config_hash: 659f65b6ccf5612986f920f7f9abbcb5
3+
config_hash: 63e520502003839482d0dbeb82132064

‎api.md‎

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ from codex.types import (
139139
ProjectReturnSchema,
140140
ProjectRetrieveResponse,
141141
ProjectListResponse,
142+
ProjectInviteSmeResponse,
142143
ProjectRetrieveAnalyticsResponse,
143144
ProjectValidateResponse,
144145
)
@@ -153,6 +154,7 @@ Methods:
153154
- <code title="delete /api/projects/{project_id}">client.projects.<a href="./src/codex/resources/projects/projects.py">delete</a>(project_id) -> None</code>
154155
- <code title="get /api/projects/{project_id}/export">client.projects.<a href="./src/codex/resources/projects/projects.py">export</a>(project_id) -> object</code>
155156
- <code title="post /api/projects/{project_id}/increment_queries">client.projects.<a href="./src/codex/resources/projects/projects.py">increment_queries</a>(project_id, \*\*<a href="src/codex/types/project_increment_queries_params.py">params</a>) -> object</code>
157+
- <code title="post /api/projects/{project_id}/notifications">client.projects.<a href="./src/codex/resources/projects/projects.py">invite_sme</a>(project_id, \*\*<a href="src/codex/types/project_invite_sme_params.py">params</a>) -> <a href="./src/codex/types/project_invite_sme_response.py">ProjectInviteSmeResponse</a></code>
156158
- <code title="get /api/projects/{project_id}/analytics/">client.projects.<a href="./src/codex/resources/projects/projects.py">retrieve_analytics</a>(project_id, \*\*<a href="src/codex/types/project_retrieve_analytics_params.py">params</a>) -> <a href="./src/codex/types/project_retrieve_analytics_response.py">ProjectRetrieveAnalyticsResponse</a></code>
157159
- <code title="post /api/projects/{project_id}/validate">client.projects.<a href="./src/codex/resources/projects/projects.py">validate</a>(project_id, \*\*<a href="src/codex/types/project_validate_params.py">params</a>) -> <a href="./src/codex/types/project_validate_response.py">ProjectValidateResponse</a></code>
158160

@@ -210,6 +212,76 @@ Methods:
210212
- <code title="get /api/projects/{project_id}/entries/clusters">client.projects.clusters.<a href="./src/codex/resources/projects/clusters.py">list</a>(project_id, \*\*<a href="src/codex/types/projects/cluster_list_params.py">params</a>) -> <a href="./src/codex/types/projects/cluster_list_response.py">SyncOffsetPageClusters[ClusterListResponse]</a></code>
211213
- <code title="get /api/projects/{project_id}/entries/clusters/{representative_entry_id}">client.projects.clusters.<a href="./src/codex/resources/projects/clusters.py">list_variants</a>(representative_entry_id, \*, project_id) -> <a href="./src/codex/types/projects/cluster_list_variants_response.py">ClusterListVariantsResponse</a></code>
212214

215+
## Evals
216+
217+
Types:
218+
219+
```python
220+
from codex.types.projects import EvalListResponse
221+
```
222+
223+
Methods:
224+
225+
- <code title="post /api/projects/{project_id}/evals">client.projects.evals.<a href="./src/codex/resources/projects/evals.py">create</a>(project_id, \*\*<a href="src/codex/types/projects/eval_create_params.py">params</a>) -> <a href="./src/codex/types/project_return_schema.py">ProjectReturnSchema</a></code>
226+
- <code title="put /api/projects/{project_id}/evals/{eval_key}">client.projects.evals.<a href="./src/codex/resources/projects/evals.py">update</a>(path_eval_key, \*, project_id, \*\*<a href="src/codex/types/projects/eval_update_params.py">params</a>) -> <a href="./src/codex/types/project_return_schema.py">ProjectReturnSchema</a></code>
227+
- <code title="get /api/projects/{project_id}/evals">client.projects.evals.<a href="./src/codex/resources/projects/evals.py">list</a>(project_id) -> <a href="./src/codex/types/projects/eval_list_response.py">EvalListResponse</a></code>
228+
- <code title="delete /api/projects/{project_id}/evals/{eval_key}">client.projects.evals.<a href="./src/codex/resources/projects/evals.py">delete</a>(eval_key, \*, project_id) -> <a href="./src/codex/types/project_return_schema.py">ProjectReturnSchema</a></code>
229+
230+
## QueryLogs
231+
232+
Types:
233+
234+
```python
235+
from codex.types.projects import (
236+
QueryLogRetrieveResponse,
237+
QueryLogListResponse,
238+
QueryLogListByGroupResponse,
239+
QueryLogListGroupsResponse,
240+
QueryLogStartRemediationResponse,
241+
)
242+
```
243+
244+
Methods:
245+
246+
- <code title="get /api/projects/{project_id}/query_logs/{query_log_id}">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">retrieve</a>(query_log_id, \*, project_id) -> <a href="./src/codex/types/projects/query_log_retrieve_response.py">QueryLogRetrieveResponse</a></code>
247+
- <code title="get /api/projects/{project_id}/query_logs/">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">list</a>(project_id, \*\*<a href="src/codex/types/projects/query_log_list_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_list_response.py">QueryLogListResponse</a></code>
248+
- <code title="get /api/projects/{project_id}/query_logs/logs_by_group">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">list_by_group</a>(project_id, \*\*<a href="src/codex/types/projects/query_log_list_by_group_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_list_by_group_response.py">QueryLogListByGroupResponse</a></code>
249+
- <code title="get /api/projects/{project_id}/query_logs/groups">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">list_groups</a>(project_id, \*\*<a href="src/codex/types/projects/query_log_list_groups_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_list_groups_response.py">QueryLogListGroupsResponse</a></code>
250+
- <code title="post /api/projects/{project_id}/query_logs/{query_log_id}/start_remediation">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">start_remediation</a>(query_log_id, \*, project_id) -> <a href="./src/codex/types/projects/query_log_start_remediation_response.py">QueryLogStartRemediationResponse</a></code>
251+
252+
## Remediations
253+
254+
Types:
255+
256+
```python
257+
from codex.types.projects import (
258+
RemediationCreateResponse,
259+
RemediationRetrieveResponse,
260+
RemediationListResponse,
261+
RemediationEditAnswerResponse,
262+
RemediationEditDraftAnswerResponse,
263+
RemediationGetResolvedLogsCountResponse,
264+
RemediationListResolvedLogsResponse,
265+
RemediationPauseResponse,
266+
RemediationPublishResponse,
267+
RemediationUnpauseResponse,
268+
)
269+
```
270+
271+
Methods:
272+
273+
- <code title="post /api/projects/{project_id}/remediations/">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">create</a>(project_id, \*\*<a href="src/codex/types/projects/remediation_create_params.py">params</a>) -> <a href="./src/codex/types/projects/remediation_create_response.py">RemediationCreateResponse</a></code>
274+
- <code title="get /api/projects/{project_id}/remediations/{remediation_id}">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">retrieve</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_retrieve_response.py">RemediationRetrieveResponse</a></code>
275+
- <code title="get /api/projects/{project_id}/remediations/">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">list</a>(project_id, \*\*<a href="src/codex/types/projects/remediation_list_params.py">params</a>) -> <a href="./src/codex/types/projects/remediation_list_response.py">RemediationListResponse</a></code>
276+
- <code title="delete /api/projects/{project_id}/remediations/{remediation_id}">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">delete</a>(remediation_id, \*, project_id) -> None</code>
277+
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/edit_answer">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">edit_answer</a>(remediation_id, \*, project_id, \*\*<a href="src/codex/types/projects/remediation_edit_answer_params.py">params</a>) -> <a href="./src/codex/types/projects/remediation_edit_answer_response.py">RemediationEditAnswerResponse</a></code>
278+
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/edit_draft_answer">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">edit_draft_answer</a>(remediation_id, \*, project_id, \*\*<a href="src/codex/types/projects/remediation_edit_draft_answer_params.py">params</a>) -> <a href="./src/codex/types/projects/remediation_edit_draft_answer_response.py">RemediationEditDraftAnswerResponse</a></code>
279+
- <code title="get /api/projects/{project_id}/remediations/{remediation_id}/resolved_logs_count">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">get_resolved_logs_count</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_get_resolved_logs_count_response.py">RemediationGetResolvedLogsCountResponse</a></code>
280+
- <code title="get /api/projects/{project_id}/remediations/{remediation_id}/resolved_logs">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">list_resolved_logs</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_list_resolved_logs_response.py">RemediationListResolvedLogsResponse</a></code>
281+
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/pause">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">pause</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_pause_response.py">RemediationPauseResponse</a></code>
282+
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/publish">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">publish</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_publish_response.py">RemediationPublishResponse</a></code>
283+
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/unpause">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">unpause</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_unpause_response.py">RemediationUnpauseResponse</a></code>
284+
213285
# Tlm
214286

215287
Types:

‎src/codex/resources/projects/__init__.py‎

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

3+
from .evals import (
4+
EvalsResource,
5+
AsyncEvalsResource,
6+
EvalsResourceWithRawResponse,
7+
AsyncEvalsResourceWithRawResponse,
8+
EvalsResourceWithStreamingResponse,
9+
AsyncEvalsResourceWithStreamingResponse,
10+
)
311
from .entries import (
412
EntriesResource,
513
AsyncEntriesResource,
@@ -24,6 +32,14 @@
2432
ProjectsResourceWithStreamingResponse,
2533
AsyncProjectsResourceWithStreamingResponse,
2634
)
35+
from .query_logs import (
36+
QueryLogsResource,
37+
AsyncQueryLogsResource,
38+
QueryLogsResourceWithRawResponse,
39+
AsyncQueryLogsResourceWithRawResponse,
40+
QueryLogsResourceWithStreamingResponse,
41+
AsyncQueryLogsResourceWithStreamingResponse,
42+
)
2743
from .access_keys import (
2844
AccessKeysResource,
2945
AsyncAccessKeysResource,
@@ -32,6 +48,14 @@
3248
AccessKeysResourceWithStreamingResponse,
3349
AsyncAccessKeysResourceWithStreamingResponse,
3450
)
51+
from .remediations import (
52+
RemediationsResource,
53+
AsyncRemediationsResource,
54+
RemediationsResourceWithRawResponse,
55+
AsyncRemediationsResourceWithRawResponse,
56+
RemediationsResourceWithStreamingResponse,
57+
AsyncRemediationsResourceWithStreamingResponse,
58+
)
3559

3660
__all__ = [
3761
"AccessKeysResource",
@@ -52,6 +76,24 @@
5276
"AsyncClustersResourceWithRawResponse",
5377
"ClustersResourceWithStreamingResponse",
5478
"AsyncClustersResourceWithStreamingResponse",
79+
"EvalsResource",
80+
"AsyncEvalsResource",
81+
"EvalsResourceWithRawResponse",
82+
"AsyncEvalsResourceWithRawResponse",
83+
"EvalsResourceWithStreamingResponse",
84+
"AsyncEvalsResourceWithStreamingResponse",
85+
"QueryLogsResource",
86+
"AsyncQueryLogsResource",
87+
"QueryLogsResourceWithRawResponse",
88+
"AsyncQueryLogsResourceWithRawResponse",
89+
"QueryLogsResourceWithStreamingResponse",
90+
"AsyncQueryLogsResourceWithStreamingResponse",
91+
"RemediationsResource",
92+
"AsyncRemediationsResource",
93+
"RemediationsResourceWithRawResponse",
94+
"AsyncRemediationsResourceWithRawResponse",
95+
"RemediationsResourceWithStreamingResponse",
96+
"AsyncRemediationsResourceWithStreamingResponse",
5597
"ProjectsResource",
5698
"AsyncProjectsResource",
5799
"ProjectsResourceWithRawResponse",

‎src/codex/resources/projects/evals.py‎

Lines changed: 803 additions & 0 deletions
Large diffs are not rendered by default.

‎src/codex/resources/projects/projects.py‎

Lines changed: 202 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,20 @@
88

99
import httpx
1010

11+
from .evals import (
12+
EvalsResource,
13+
AsyncEvalsResource,
14+
EvalsResourceWithRawResponse,
15+
AsyncEvalsResourceWithRawResponse,
16+
EvalsResourceWithStreamingResponse,
17+
AsyncEvalsResourceWithStreamingResponse,
18+
)
1119
from ...types import (
1220
project_list_params,
1321
project_create_params,
1422
project_update_params,
1523
project_validate_params,
24+
project_invite_sme_params,
1625
project_increment_queries_params,
1726
project_retrieve_analytics_params,
1827
)
@@ -35,6 +44,14 @@
3544
AsyncClustersResourceWithStreamingResponse,
3645
)
3746
from ..._compat import cached_property
47+
from .query_logs import (
48+
QueryLogsResource,
49+
AsyncQueryLogsResource,
50+
QueryLogsResourceWithRawResponse,
51+
AsyncQueryLogsResourceWithRawResponse,
52+
QueryLogsResourceWithStreamingResponse,
53+
AsyncQueryLogsResourceWithStreamingResponse,
54+
)
3855
from ..._resource import SyncAPIResource, AsyncAPIResource
3956
from ..._response import (
4057
to_raw_response_wrapper,
@@ -50,11 +67,20 @@
5067
AccessKeysResourceWithStreamingResponse,
5168
AsyncAccessKeysResourceWithStreamingResponse,
5269
)
70+
from .remediations import (
71+
RemediationsResource,
72+
AsyncRemediationsResource,
73+
RemediationsResourceWithRawResponse,
74+
AsyncRemediationsResourceWithRawResponse,
75+
RemediationsResourceWithStreamingResponse,
76+
AsyncRemediationsResourceWithStreamingResponse,
77+
)
5378
from ..._base_client import make_request_options
5479
from ...types.project_list_response import ProjectListResponse
5580
from ...types.project_return_schema import ProjectReturnSchema
5681
from ...types.project_retrieve_response import ProjectRetrieveResponse
5782
from ...types.project_validate_response import ProjectValidateResponse
83+
from ...types.project_invite_sme_response import ProjectInviteSmeResponse
5884
from ...types.project_retrieve_analytics_response import ProjectRetrieveAnalyticsResponse
5985

6086
__all__ = ["ProjectsResource", "AsyncProjectsResource"]
@@ -73,6 +99,18 @@ def entries(self) -> EntriesResource:
7399
def clusters(self) -> ClustersResource:
74100
return ClustersResource(self._client)
75101

102+
@cached_property
103+
def evals(self) -> EvalsResource:
104+
return EvalsResource(self._client)
105+
106+
@cached_property
107+
def query_logs(self) -> QueryLogsResource:
108+
return QueryLogsResource(self._client)
109+
110+
@cached_property
111+
def remediations(self) -> RemediationsResource:
112+
return RemediationsResource(self._client)
113+
76114
@cached_property
77115
def with_raw_response(self) -> ProjectsResourceWithRawResponse:
78116
"""
@@ -374,6 +412,52 @@ def increment_queries(
374412
cast_to=object,
375413
)
376414

415+
def invite_sme(
416+
self,
417+
project_id: str,
418+
*,
419+
email: str,
420+
page_type: Literal["query_log", "remediation"],
421+
url_query_string: str,
422+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
423+
# The extra values given here take precedence over values defined on the client or passed to this method.
424+
extra_headers: Headers | None = None,
425+
extra_query: Query | None = None,
426+
extra_body: Body | None = None,
427+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
428+
) -> ProjectInviteSmeResponse:
429+
"""
430+
Invite a subject matter expert to view a specific query log or remediation.
431+
432+
Returns: SMERemediationNotificationResponse with status and notification details
433+
434+
Args:
435+
extra_headers: Send extra headers
436+
437+
extra_query: Add additional query parameters to the request
438+
439+
extra_body: Add additional JSON properties to the request
440+
441+
timeout: Override the client-level default timeout for this request, in seconds
442+
"""
443+
if not project_id:
444+
raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
445+
return self._post(
446+
f"/api/projects/{project_id}/notifications",
447+
body=maybe_transform(
448+
{
449+
"email": email,
450+
"page_type": page_type,
451+
"url_query_string": url_query_string,
452+
},
453+
project_invite_sme_params.ProjectInviteSmeParams,
454+
),
455+
options=make_request_options(
456+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
457+
),
458+
cast_to=ProjectInviteSmeResponse,
459+
)
460+
377461
def retrieve_analytics(
378462
self,
379463
project_id: str,
@@ -623,6 +707,18 @@ def entries(self) -> AsyncEntriesResource:
623707
def clusters(self) -> AsyncClustersResource:
624708
return AsyncClustersResource(self._client)
625709

710+
@cached_property
711+
def evals(self) -> AsyncEvalsResource:
712+
return AsyncEvalsResource(self._client)
713+
714+
@cached_property
715+
def query_logs(self) -> AsyncQueryLogsResource:
716+
return AsyncQueryLogsResource(self._client)
717+
718+
@cached_property
719+
def remediations(self) -> AsyncRemediationsResource:
720+
return AsyncRemediationsResource(self._client)
721+
626722
@cached_property
627723
def with_raw_response(self) -> AsyncProjectsResourceWithRawResponse:
628724
"""
@@ -926,6 +1022,52 @@ async def increment_queries(
9261022
cast_to=object,
9271023
)
9281024

1025+
async def invite_sme(
1026+
self,
1027+
project_id: str,
1028+
*,
1029+
email: str,
1030+
page_type: Literal["query_log", "remediation"],
1031+
url_query_string: str,
1032+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1033+
# The extra values given here take precedence over values defined on the client or passed to this method.
1034+
extra_headers: Headers | None = None,
1035+
extra_query: Query | None = None,
1036+
extra_body: Body | None = None,
1037+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1038+
) -> ProjectInviteSmeResponse:
1039+
"""
1040+
Invite a subject matter expert to view a specific query log or remediation.
1041+
1042+
Returns: SMERemediationNotificationResponse with status and notification details
1043+
1044+
Args:
1045+
extra_headers: Send extra headers
1046+
1047+
extra_query: Add additional query parameters to the request
1048+
1049+
extra_body: Add additional JSON properties to the request
1050+
1051+
timeout: Override the client-level default timeout for this request, in seconds
1052+
"""
1053+
if not project_id:
1054+
raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
1055+
return await self._post(
1056+
f"/api/projects/{project_id}/notifications",
1057+
body=await async_maybe_transform(
1058+
{
1059+
"email": email,
1060+
"page_type": page_type,
1061+
"url_query_string": url_query_string,
1062+
},
1063+
project_invite_sme_params.ProjectInviteSmeParams,
1064+
),
1065+
options=make_request_options(
1066+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1067+
),
1068+
cast_to=ProjectInviteSmeResponse,
1069+
)
1070+
9291071
async def retrieve_analytics(
9301072
self,
9311073
project_id: str,
@@ -1189,6 +1331,9 @@ def __init__(self, projects: ProjectsResource) -> None:
11891331
projects.increment_queries # pyright: ignore[reportDeprecated],
11901332
)
11911333
)
1334+
self.invite_sme = to_raw_response_wrapper(
1335+
projects.invite_sme,
1336+
)
11921337
self.retrieve_analytics = to_raw_response_wrapper(
11931338
projects.retrieve_analytics,
11941339
)
@@ -1208,6 +1353,18 @@ def entries(self) -> EntriesResourceWithRawResponse:
12081353
def clusters(self) -> ClustersResourceWithRawResponse:
12091354
return ClustersResourceWithRawResponse(self._projects.clusters)
12101355

1356+
@cached_property
1357+
def evals(self) -> EvalsResourceWithRawResponse:
1358+
return EvalsResourceWithRawResponse(self._projects.evals)
1359+
1360+
@cached_property
1361+
def query_logs(self) -> QueryLogsResourceWithRawResponse:
1362+
return QueryLogsResourceWithRawResponse(self._projects.query_logs)
1363+
1364+
@cached_property
1365+
def remediations(self) -> RemediationsResourceWithRawResponse:
1366+
return RemediationsResourceWithRawResponse(self._projects.remediations)
1367+
12111368

12121369
class AsyncProjectsResourceWithRawResponse:
12131370
def __init__(self, projects: AsyncProjectsResource) -> None:
@@ -1236,6 +1393,9 @@ def __init__(self, projects: AsyncProjectsResource) -> None:
12361393
projects.increment_queries # pyright: ignore[reportDeprecated],
12371394
)
12381395
)
1396+
self.invite_sme = async_to_raw_response_wrapper(
1397+
projects.invite_sme,
1398+
)
12391399
self.retrieve_analytics = async_to_raw_response_wrapper(
12401400
projects.retrieve_analytics,
12411401
)
@@ -1255,6 +1415,18 @@ def entries(self) -> AsyncEntriesResourceWithRawResponse:
12551415
def clusters(self) -> AsyncClustersResourceWithRawResponse:
12561416
return AsyncClustersResourceWithRawResponse(self._projects.clusters)
12571417

1418+
@cached_property
1419+
def evals(self) -> AsyncEvalsResourceWithRawResponse:
1420+
return AsyncEvalsResourceWithRawResponse(self._projects.evals)
1421+
1422+
@cached_property
1423+
def query_logs(self) -> AsyncQueryLogsResourceWithRawResponse:
1424+
return AsyncQueryLogsResourceWithRawResponse(self._projects.query_logs)
1425+
1426+
@cached_property
1427+
def remediations(self) -> AsyncRemediationsResourceWithRawResponse:
1428+
return AsyncRemediationsResourceWithRawResponse(self._projects.remediations)
1429+
12581430

12591431
class ProjectsResourceWithStreamingResponse:
12601432
def __init__(self, projects: ProjectsResource) -> None:
@@ -1283,6 +1455,9 @@ def __init__(self, projects: ProjectsResource) -> None:
12831455
projects.increment_queries # pyright: ignore[reportDeprecated],
12841456
)
12851457
)
1458+
self.invite_sme = to_streamed_response_wrapper(
1459+
projects.invite_sme,
1460+
)
12861461
self.retrieve_analytics = to_streamed_response_wrapper(
12871462
projects.retrieve_analytics,
12881463
)
@@ -1302,6 +1477,18 @@ def entries(self) -> EntriesResourceWithStreamingResponse:
13021477
def clusters(self) -> ClustersResourceWithStreamingResponse:
13031478
return ClustersResourceWithStreamingResponse(self._projects.clusters)
13041479

1480+
@cached_property
1481+
def evals(self) -> EvalsResourceWithStreamingResponse:
1482+
return EvalsResourceWithStreamingResponse(self._projects.evals)
1483+
1484+
@cached_property
1485+
def query_logs(self) -> QueryLogsResourceWithStreamingResponse:
1486+
return QueryLogsResourceWithStreamingResponse(self._projects.query_logs)
1487+
1488+
@cached_property
1489+
def remediations(self) -> RemediationsResourceWithStreamingResponse:
1490+
return RemediationsResourceWithStreamingResponse(self._projects.remediations)
1491+
13051492

13061493
class AsyncProjectsResourceWithStreamingResponse:
13071494
def __init__(self, projects: AsyncProjectsResource) -> None:
@@ -1330,6 +1517,9 @@ def __init__(self, projects: AsyncProjectsResource) -> None:
13301517
projects.increment_queries # pyright: ignore[reportDeprecated],
13311518
)
13321519
)
1520+
self.invite_sme = async_to_streamed_response_wrapper(
1521+
projects.invite_sme,
1522+
)
13331523
self.retrieve_analytics = async_to_streamed_response_wrapper(
13341524
projects.retrieve_analytics,
13351525
)
@@ -1348,3 +1538,15 @@ def entries(self) -> AsyncEntriesResourceWithStreamingResponse:
13481538
@cached_property
13491539
def clusters(self) -> AsyncClustersResourceWithStreamingResponse:
13501540
return AsyncClustersResourceWithStreamingResponse(self._projects.clusters)
1541+
1542+
@cached_property
1543+
def evals(self) -> AsyncEvalsResourceWithStreamingResponse:
1544+
return AsyncEvalsResourceWithStreamingResponse(self._projects.evals)
1545+
1546+
@cached_property
1547+
def query_logs(self) -> AsyncQueryLogsResourceWithStreamingResponse:
1548+
return AsyncQueryLogsResourceWithStreamingResponse(self._projects.query_logs)
1549+
1550+
@cached_property
1551+
def remediations(self) -> AsyncRemediationsResourceWithStreamingResponse:
1552+
return AsyncRemediationsResourceWithStreamingResponse(self._projects.remediations)

‎src/codex/resources/projects/query_logs.py‎

Lines changed: 741 additions & 0 deletions
Large diffs are not rendered by default.

‎src/codex/resources/projects/remediations.py‎

Lines changed: 1135 additions & 0 deletions
Large diffs are not rendered by default.

‎src/codex/types/__init__.py‎

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,11 @@
1313
from .project_return_schema import ProjectReturnSchema as ProjectReturnSchema
1414
from .project_update_params import ProjectUpdateParams as ProjectUpdateParams
1515
from .project_validate_params import ProjectValidateParams as ProjectValidateParams
16+
from .project_invite_sme_params import ProjectInviteSmeParams as ProjectInviteSmeParams
1617
from .project_retrieve_response import ProjectRetrieveResponse as ProjectRetrieveResponse
1718
from .project_validate_response import ProjectValidateResponse as ProjectValidateResponse
1819
from .organization_schema_public import OrganizationSchemaPublic as OrganizationSchemaPublic
20+
from .project_invite_sme_response import ProjectInviteSmeResponse as ProjectInviteSmeResponse
1921
from .user_activate_account_params import UserActivateAccountParams as UserActivateAccountParams
2022
from .project_increment_queries_params import ProjectIncrementQueriesParams as ProjectIncrementQueriesParams
2123
from .project_retrieve_analytics_params import ProjectRetrieveAnalyticsParams as ProjectRetrieveAnalyticsParams
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing_extensions import Literal, Required, TypedDict
6+
7+
__all__ = ["ProjectInviteSmeParams"]
8+
9+
10+
class ProjectInviteSmeParams(TypedDict, total=False):
11+
email: Required[str]
12+
13+
page_type: Required[Literal["query_log", "remediation"]]
14+
15+
url_query_string: Required[str]
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from .._models import BaseModel
4+
5+
__all__ = ["ProjectInviteSmeResponse"]
6+
7+
8+
class ProjectInviteSmeResponse(BaseModel):
9+
recipient_email: str
10+
11+
status: str

‎src/codex/types/projects/__init__.py‎

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,48 @@
55
from .entry import Entry as Entry
66
from .access_key_schema import AccessKeySchema as AccessKeySchema
77
from .entry_query_params import EntryQueryParams as EntryQueryParams
8+
from .eval_create_params import EvalCreateParams as EvalCreateParams
9+
from .eval_list_response import EvalListResponse as EvalListResponse
10+
from .eval_update_params import EvalUpdateParams as EvalUpdateParams
811
from .cluster_list_params import ClusterListParams as ClusterListParams
912
from .entry_create_params import EntryCreateParams as EntryCreateParams
1013
from .entry_update_params import EntryUpdateParams as EntryUpdateParams
1114
from .entry_query_response import EntryQueryResponse as EntryQueryResponse
1215
from .cluster_list_response import ClusterListResponse as ClusterListResponse
16+
from .query_log_list_params import QueryLogListParams as QueryLogListParams
1317
from .entry_notify_sme_params import EntryNotifySmeParams as EntryNotifySmeParams
18+
from .query_log_list_response import QueryLogListResponse as QueryLogListResponse
19+
from .remediation_list_params import RemediationListParams as RemediationListParams
1420
from .access_key_create_params import AccessKeyCreateParams as AccessKeyCreateParams
1521
from .access_key_list_response import AccessKeyListResponse as AccessKeyListResponse
1622
from .access_key_update_params import AccessKeyUpdateParams as AccessKeyUpdateParams
1723
from .entry_notify_sme_response import EntryNotifySmeResponse as EntryNotifySmeResponse
24+
from .remediation_create_params import RemediationCreateParams as RemediationCreateParams
25+
from .remediation_list_response import RemediationListResponse as RemediationListResponse
26+
from .remediation_pause_response import RemediationPauseResponse as RemediationPauseResponse
27+
from .query_log_retrieve_response import QueryLogRetrieveResponse as QueryLogRetrieveResponse
28+
from .remediation_create_response import RemediationCreateResponse as RemediationCreateResponse
29+
from .query_log_list_groups_params import QueryLogListGroupsParams as QueryLogListGroupsParams
30+
from .remediation_publish_response import RemediationPublishResponse as RemediationPublishResponse
31+
from .remediation_unpause_response import RemediationUnpauseResponse as RemediationUnpauseResponse
32+
from .remediation_retrieve_response import RemediationRetrieveResponse as RemediationRetrieveResponse
1833
from .cluster_list_variants_response import ClusterListVariantsResponse as ClusterListVariantsResponse
34+
from .query_log_list_by_group_params import QueryLogListByGroupParams as QueryLogListByGroupParams
35+
from .query_log_list_groups_response import QueryLogListGroupsResponse as QueryLogListGroupsResponse
36+
from .remediation_edit_answer_params import RemediationEditAnswerParams as RemediationEditAnswerParams
37+
from .query_log_list_by_group_response import QueryLogListByGroupResponse as QueryLogListByGroupResponse
38+
from .remediation_edit_answer_response import RemediationEditAnswerResponse as RemediationEditAnswerResponse
39+
from .query_log_start_remediation_response import QueryLogStartRemediationResponse as QueryLogStartRemediationResponse
40+
from .remediation_edit_draft_answer_params import RemediationEditDraftAnswerParams as RemediationEditDraftAnswerParams
41+
from .remediation_edit_draft_answer_response import (
42+
RemediationEditDraftAnswerResponse as RemediationEditDraftAnswerResponse,
43+
)
1944
from .access_key_retrieve_project_id_response import (
2045
AccessKeyRetrieveProjectIDResponse as AccessKeyRetrieveProjectIDResponse,
2146
)
47+
from .remediation_list_resolved_logs_response import (
48+
RemediationListResolvedLogsResponse as RemediationListResolvedLogsResponse,
49+
)
50+
from .remediation_get_resolved_logs_count_response import (
51+
RemediationGetResolvedLogsCountResponse as RemediationGetResolvedLogsCountResponse,
52+
)
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import Optional
6+
from typing_extensions import Literal, Required, TypedDict
7+
8+
__all__ = ["EvalCreateParams"]
9+
10+
11+
class EvalCreateParams(TypedDict, total=False):
12+
criteria: Required[str]
13+
"""
14+
The evaluation criteria text that describes what aspect is being evaluated and
15+
how
16+
"""
17+
18+
eval_key: Required[str]
19+
"""
20+
Unique key for eval metric - currently maps to the TrustworthyRAG name property
21+
and eval_scores dictionary key to check against threshold
22+
"""
23+
24+
name: Required[str]
25+
"""Display name/label for the evaluation metric"""
26+
27+
context_identifier: Optional[str]
28+
"""
29+
The exact string used in your evaluation criteria to reference the retrieved
30+
context.
31+
"""
32+
33+
enabled: bool
34+
"""Allows the evaluation to be disabled without removing it"""
35+
36+
is_default: bool
37+
"""Whether the eval is a default, built-in eval or a custom eval"""
38+
39+
priority: Optional[int]
40+
"""
41+
Priority order for evals (lower number = higher priority) to determine primary
42+
eval issue to surface
43+
"""
44+
45+
query_identifier: Optional[str]
46+
"""
47+
The exact string used in your evaluation criteria to reference the user's query.
48+
"""
49+
50+
response_identifier: Optional[str]
51+
"""
52+
The exact string used in your evaluation criteria to reference the RAG/LLM
53+
response.
54+
"""
55+
56+
should_escalate: bool
57+
"""
58+
If true, failing this eval means the response is considered bad and can trigger
59+
escalation to Codex/SME
60+
"""
61+
62+
threshold: float
63+
"""Threshold value that determines if the evaluation fails"""
64+
65+
threshold_direction: Literal["above", "below"]
66+
"""Whether the evaluation fails when score is above or below the threshold"""
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import List, Optional
4+
from typing_extensions import Literal, TypeAlias
5+
6+
from ..._models import BaseModel
7+
8+
__all__ = ["EvalListResponse", "EvalListResponseItem"]
9+
10+
11+
class EvalListResponseItem(BaseModel):
12+
criteria: str
13+
"""
14+
The evaluation criteria text that describes what aspect is being evaluated and
15+
how
16+
"""
17+
18+
eval_key: str
19+
"""
20+
Unique key for eval metric - currently maps to the TrustworthyRAG name property
21+
and eval_scores dictionary key to check against threshold
22+
"""
23+
24+
name: str
25+
"""Display name/label for the evaluation metric"""
26+
27+
context_identifier: Optional[str] = None
28+
"""
29+
The exact string used in your evaluation criteria to reference the retrieved
30+
context.
31+
"""
32+
33+
enabled: Optional[bool] = None
34+
"""Allows the evaluation to be disabled without removing it"""
35+
36+
is_default: Optional[bool] = None
37+
"""Whether the eval is a default, built-in eval or a custom eval"""
38+
39+
priority: Optional[int] = None
40+
"""
41+
Priority order for evals (lower number = higher priority) to determine primary
42+
eval issue to surface
43+
"""
44+
45+
query_identifier: Optional[str] = None
46+
"""
47+
The exact string used in your evaluation criteria to reference the user's query.
48+
"""
49+
50+
response_identifier: Optional[str] = None
51+
"""
52+
The exact string used in your evaluation criteria to reference the RAG/LLM
53+
response.
54+
"""
55+
56+
should_escalate: Optional[bool] = None
57+
"""
58+
If true, failing this eval means the response is considered bad and can trigger
59+
escalation to Codex/SME
60+
"""
61+
62+
threshold: Optional[float] = None
63+
"""Threshold value that determines if the evaluation fails"""
64+
65+
threshold_direction: Optional[Literal["above", "below"]] = None
66+
"""Whether the evaluation fails when score is above or below the threshold"""
67+
68+
69+
EvalListResponse: TypeAlias = List[EvalListResponseItem]
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import Union, Optional
6+
from typing_extensions import Literal, Required, Annotated, TypeAlias, TypedDict
7+
8+
from ..._utils import PropertyInfo
9+
10+
__all__ = ["EvalUpdateParams", "CustomEvalCreateOrUpdateSchema", "DefaultEvalUpdateSchema"]
11+
12+
13+
class CustomEvalCreateOrUpdateSchema(TypedDict, total=False):
14+
project_id: Required[str]
15+
16+
criteria: Required[str]
17+
"""
18+
The evaluation criteria text that describes what aspect is being evaluated and
19+
how
20+
"""
21+
22+
body_eval_key: Required[Annotated[str, PropertyInfo(alias="eval_key")]]
23+
"""
24+
Unique key for eval metric - currently maps to the TrustworthyRAG name property
25+
and eval_scores dictionary key to check against threshold
26+
"""
27+
28+
name: Required[str]
29+
"""Display name/label for the evaluation metric"""
30+
31+
context_identifier: Optional[str]
32+
"""
33+
The exact string used in your evaluation criteria to reference the retrieved
34+
context.
35+
"""
36+
37+
enabled: bool
38+
"""Allows the evaluation to be disabled without removing it"""
39+
40+
is_default: bool
41+
"""Whether the eval is a default, built-in eval or a custom eval"""
42+
43+
priority: Optional[int]
44+
"""
45+
Priority order for evals (lower number = higher priority) to determine primary
46+
eval issue to surface
47+
"""
48+
49+
query_identifier: Optional[str]
50+
"""
51+
The exact string used in your evaluation criteria to reference the user's query.
52+
"""
53+
54+
response_identifier: Optional[str]
55+
"""
56+
The exact string used in your evaluation criteria to reference the RAG/LLM
57+
response.
58+
"""
59+
60+
should_escalate: bool
61+
"""
62+
If true, failing this eval means the response is considered bad and can trigger
63+
escalation to Codex/SME
64+
"""
65+
66+
threshold: float
67+
"""Threshold value that determines if the evaluation fails"""
68+
69+
threshold_direction: Literal["above", "below"]
70+
"""Whether the evaluation fails when score is above or below the threshold"""
71+
72+
73+
class DefaultEvalUpdateSchema(TypedDict, total=False):
74+
project_id: Required[str]
75+
76+
body_eval_key: Required[Annotated[str, PropertyInfo(alias="eval_key")]]
77+
"""
78+
Unique key for eval metric - currently maps to the TrustworthyRAG name property
79+
and eval_scores dictionary key to check against threshold
80+
"""
81+
82+
enabled: bool
83+
"""Allows the evaluation to be disabled without removing it"""
84+
85+
priority: Optional[int]
86+
"""
87+
Priority order for evals (lower number = higher priority) to determine primary
88+
eval issue to surface
89+
"""
90+
91+
should_escalate: bool
92+
"""
93+
If true, failing this eval means the response is considered bad and can trigger
94+
escalation to Codex/SME
95+
"""
96+
97+
threshold: float
98+
"""Threshold value that determines if the evaluation fails"""
99+
100+
threshold_direction: Literal["above", "below"]
101+
"""Whether the evaluation fails when score is above or below the threshold"""
102+
103+
104+
EvalUpdateParams: TypeAlias = Union[CustomEvalCreateOrUpdateSchema, DefaultEvalUpdateSchema]
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import List, Union, Optional
6+
from datetime import datetime
7+
from typing_extensions import Literal, Annotated, TypedDict
8+
9+
from ..._utils import PropertyInfo
10+
11+
__all__ = ["QueryLogListByGroupParams"]
12+
13+
14+
class QueryLogListByGroupParams(TypedDict, total=False):
15+
created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
16+
"""Filter logs created at or before this timestamp"""
17+
18+
created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
19+
"""Filter logs created at or after this timestamp"""
20+
21+
custom_metadata: Optional[str]
22+
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
23+
24+
limit: int
25+
26+
offset: int
27+
28+
order: Literal["asc", "desc"]
29+
30+
primary_eval_issue: Optional[
31+
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]]
32+
]
33+
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
34+
35+
remediation_ids: List[str]
36+
"""List of groups to list child logs for"""
37+
38+
sort: Optional[Literal["created_at", "primary_eval_issue_score"]]
39+
40+
was_cache_hit: Optional[bool]
41+
"""Filter by cache hit status"""
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Dict, List, Union, Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = [
10+
"QueryLogListByGroupResponse",
11+
"QueryLogsByGroup",
12+
"QueryLogsByGroupQueryLog",
13+
"QueryLogsByGroupQueryLogContext",
14+
]
15+
16+
17+
class QueryLogsByGroupQueryLogContext(BaseModel):
18+
content: str
19+
"""The actual content/text of the document."""
20+
21+
id: Optional[str] = None
22+
"""Unique identifier for the document. Useful for tracking documents"""
23+
24+
source: Optional[str] = None
25+
"""Source or origin of the document. Useful for citations."""
26+
27+
tags: Optional[List[str]] = None
28+
"""Tags or categories for the document. Useful for filtering"""
29+
30+
title: Optional[str] = None
31+
"""Title or heading of the document. Useful for display and context."""
32+
33+
34+
class QueryLogsByGroupQueryLog(BaseModel):
35+
id: str
36+
37+
created_at: datetime
38+
39+
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
40+
"""Format evaluation scores for frontend display with pass/fail status.
41+
42+
Returns: Dictionary mapping eval keys to their formatted representation: {
43+
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
44+
eval_scores is None.
45+
"""
46+
47+
is_bad_response: bool
48+
"""If an eval with should_escalate=True failed"""
49+
50+
project_id: str
51+
52+
question: str
53+
54+
remediation_id: str
55+
56+
was_cache_hit: Optional[bool] = None
57+
"""If similar query already answered, or None if cache was not checked"""
58+
59+
context: Optional[List[QueryLogsByGroupQueryLogContext]] = None
60+
"""RAG context used for the query"""
61+
62+
custom_metadata: Optional[object] = None
63+
"""Arbitrary metadata supplied by the user/system"""
64+
65+
custom_metadata_keys: Optional[List[str]] = None
66+
"""Keys of the custom metadata"""
67+
68+
eval_issue_labels: Optional[List[str]] = None
69+
"""Labels derived from evaluation scores"""
70+
71+
eval_scores: Optional[Dict[str, float]] = None
72+
"""Evaluation scores for the original response"""
73+
74+
eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None
75+
"""Evaluation thresholds and directions at time of creation"""
76+
77+
evaluated_response: Optional[str] = None
78+
"""The response being evaluated from the RAG system (before any remediation)"""
79+
80+
primary_eval_issue: Optional[str] = None
81+
"""Primary issue identified in evaluation"""
82+
83+
primary_eval_issue_score: Optional[float] = None
84+
"""Score of the primary eval issue"""
85+
86+
87+
class QueryLogsByGroup(BaseModel):
88+
query_logs: List[QueryLogsByGroupQueryLog]
89+
90+
total_count: int
91+
92+
93+
class QueryLogListByGroupResponse(BaseModel):
94+
custom_metadata_columns: List[str]
95+
96+
query_logs_by_group: Dict[str, QueryLogsByGroup]
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import List, Union, Optional
6+
from datetime import datetime
7+
from typing_extensions import Literal, Annotated, TypedDict
8+
9+
from ..._utils import PropertyInfo
10+
11+
__all__ = ["QueryLogListGroupsParams"]
12+
13+
14+
class QueryLogListGroupsParams(TypedDict, total=False):
15+
created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
16+
"""Filter logs created at or before this timestamp"""
17+
18+
created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
19+
"""Filter logs created at or after this timestamp"""
20+
21+
custom_metadata: Optional[str]
22+
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
23+
24+
limit: int
25+
26+
offset: int
27+
28+
order: Literal["asc", "desc"]
29+
30+
primary_eval_issue: Optional[
31+
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]]
32+
]
33+
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
34+
35+
sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank"]]
36+
37+
was_cache_hit: Optional[bool]
38+
"""Filter by cache hit status"""
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Dict, List, Union, Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["QueryLogListGroupsResponse", "QueryLogGroup", "QueryLogGroupContext"]
10+
11+
12+
class QueryLogGroupContext(BaseModel):
13+
content: str
14+
"""The actual content/text of the document."""
15+
16+
id: Optional[str] = None
17+
"""Unique identifier for the document. Useful for tracking documents"""
18+
19+
source: Optional[str] = None
20+
"""Source or origin of the document. Useful for citations."""
21+
22+
tags: Optional[List[str]] = None
23+
"""Tags or categories for the document. Useful for filtering"""
24+
25+
title: Optional[str] = None
26+
"""Title or heading of the document. Useful for display and context."""
27+
28+
29+
class QueryLogGroup(BaseModel):
30+
id: str
31+
32+
created_at: datetime
33+
34+
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
35+
"""Format evaluation scores for frontend display with pass/fail status.
36+
37+
Returns: Dictionary mapping eval keys to their formatted representation: {
38+
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
39+
eval_scores is None.
40+
"""
41+
42+
is_bad_response: bool
43+
"""If an eval with should_escalate=True failed"""
44+
45+
project_id: str
46+
47+
question: str
48+
49+
remediation_id: str
50+
51+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
52+
53+
total_count: int
54+
55+
was_cache_hit: Optional[bool] = None
56+
"""If similar query already answered, or None if cache was not checked"""
57+
58+
context: Optional[List[QueryLogGroupContext]] = None
59+
"""RAG context used for the query"""
60+
61+
custom_metadata: Optional[object] = None
62+
"""Arbitrary metadata supplied by the user/system"""
63+
64+
custom_metadata_keys: Optional[List[str]] = None
65+
"""Keys of the custom metadata"""
66+
67+
eval_issue_labels: Optional[List[str]] = None
68+
"""Labels derived from evaluation scores"""
69+
70+
eval_scores: Optional[Dict[str, float]] = None
71+
"""Evaluation scores for the original response"""
72+
73+
eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None
74+
"""Evaluation thresholds and directions at time of creation"""
75+
76+
evaluated_response: Optional[str] = None
77+
"""The response being evaluated from the RAG system (before any remediation)"""
78+
79+
primary_eval_issue: Optional[str] = None
80+
"""Primary issue identified in evaluation"""
81+
82+
primary_eval_issue_score: Optional[float] = None
83+
"""Score of the primary eval issue"""
84+
85+
86+
class QueryLogListGroupsResponse(BaseModel):
87+
custom_metadata_columns: List[str]
88+
89+
query_log_groups: List[QueryLogGroup]
90+
91+
total_count: int
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import List, Union, Optional
6+
from datetime import datetime
7+
from typing_extensions import Literal, Annotated, TypedDict
8+
9+
from ..._utils import PropertyInfo
10+
11+
__all__ = ["QueryLogListParams"]
12+
13+
14+
class QueryLogListParams(TypedDict, total=False):
15+
created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
16+
"""Filter logs created at or before this timestamp"""
17+
18+
created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
19+
"""Filter logs created at or after this timestamp"""
20+
21+
custom_metadata: Optional[str]
22+
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
23+
24+
limit: int
25+
26+
offset: int
27+
28+
order: Literal["asc", "desc"]
29+
30+
primary_eval_issue: Optional[
31+
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "unsupported"]]
32+
]
33+
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
34+
35+
sort: Optional[Literal["created_at", "primary_eval_issue_score"]]
36+
37+
was_cache_hit: Optional[bool]
38+
"""Filter by cache hit status"""
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Dict, List, Union, Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["QueryLogListResponse", "QueryLog", "QueryLogContext"]
10+
11+
12+
class QueryLogContext(BaseModel):
13+
content: str
14+
"""The actual content/text of the document."""
15+
16+
id: Optional[str] = None
17+
"""Unique identifier for the document. Useful for tracking documents"""
18+
19+
source: Optional[str] = None
20+
"""Source or origin of the document. Useful for citations."""
21+
22+
tags: Optional[List[str]] = None
23+
"""Tags or categories for the document. Useful for filtering"""
24+
25+
title: Optional[str] = None
26+
"""Title or heading of the document. Useful for display and context."""
27+
28+
29+
class QueryLog(BaseModel):
30+
id: str
31+
32+
created_at: datetime
33+
34+
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
35+
"""Format evaluation scores for frontend display with pass/fail status.
36+
37+
Returns: Dictionary mapping eval keys to their formatted representation: {
38+
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
39+
eval_scores is None.
40+
"""
41+
42+
is_bad_response: bool
43+
"""If an eval with should_escalate=True failed"""
44+
45+
project_id: str
46+
47+
question: str
48+
49+
remediation_id: str
50+
51+
was_cache_hit: Optional[bool] = None
52+
"""If similar query already answered, or None if cache was not checked"""
53+
54+
context: Optional[List[QueryLogContext]] = None
55+
"""RAG context used for the query"""
56+
57+
custom_metadata: Optional[object] = None
58+
"""Arbitrary metadata supplied by the user/system"""
59+
60+
custom_metadata_keys: Optional[List[str]] = None
61+
"""Keys of the custom metadata"""
62+
63+
eval_issue_labels: Optional[List[str]] = None
64+
"""Labels derived from evaluation scores"""
65+
66+
eval_scores: Optional[Dict[str, float]] = None
67+
"""Evaluation scores for the original response"""
68+
69+
eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None
70+
"""Evaluation thresholds and directions at time of creation"""
71+
72+
evaluated_response: Optional[str] = None
73+
"""The response being evaluated from the RAG system (before any remediation)"""
74+
75+
primary_eval_issue: Optional[str] = None
76+
"""Primary issue identified in evaluation"""
77+
78+
primary_eval_issue_score: Optional[float] = None
79+
"""Score of the primary eval issue"""
80+
81+
82+
class QueryLogListResponse(BaseModel):
83+
custom_metadata_columns: List[str]
84+
85+
query_logs: List[QueryLog]
86+
87+
total_count: int
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Dict, List, Union, Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["QueryLogRetrieveResponse", "Context"]
10+
11+
12+
class Context(BaseModel):
13+
content: str
14+
"""The actual content/text of the document."""
15+
16+
id: Optional[str] = None
17+
"""Unique identifier for the document. Useful for tracking documents"""
18+
19+
source: Optional[str] = None
20+
"""Source or origin of the document. Useful for citations."""
21+
22+
tags: Optional[List[str]] = None
23+
"""Tags or categories for the document. Useful for filtering"""
24+
25+
title: Optional[str] = None
26+
"""Title or heading of the document. Useful for display and context."""
27+
28+
29+
class QueryLogRetrieveResponse(BaseModel):
30+
id: str
31+
32+
created_at: datetime
33+
34+
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
35+
"""Format evaluation scores for frontend display with pass/fail status.
36+
37+
Returns: Dictionary mapping eval keys to their formatted representation: {
38+
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
39+
eval_scores is None.
40+
"""
41+
42+
is_bad_response: bool
43+
"""If an eval with should_escalate=True failed"""
44+
45+
project_id: str
46+
47+
question: str
48+
49+
remediation_id: str
50+
51+
was_cache_hit: Optional[bool] = None
52+
"""If similar query already answered, or None if cache was not checked"""
53+
54+
context: Optional[List[Context]] = None
55+
"""RAG context used for the query"""
56+
57+
custom_metadata: Optional[object] = None
58+
"""Arbitrary metadata supplied by the user/system"""
59+
60+
custom_metadata_keys: Optional[List[str]] = None
61+
"""Keys of the custom metadata"""
62+
63+
eval_issue_labels: Optional[List[str]] = None
64+
"""Labels derived from evaluation scores"""
65+
66+
eval_scores: Optional[Dict[str, float]] = None
67+
"""Evaluation scores for the original response"""
68+
69+
eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None
70+
"""Evaluation thresholds and directions at time of creation"""
71+
72+
evaluated_response: Optional[str] = None
73+
"""The response being evaluated from the RAG system (before any remediation)"""
74+
75+
primary_eval_issue: Optional[str] = None
76+
"""Primary issue identified in evaluation"""
77+
78+
primary_eval_issue_score: Optional[float] = None
79+
"""Score of the primary eval issue"""
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["QueryLogStartRemediationResponse"]
10+
11+
12+
class QueryLogStartRemediationResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import Optional
6+
from typing_extensions import Required, TypedDict
7+
8+
__all__ = ["RemediationCreateParams"]
9+
10+
11+
class RemediationCreateParams(TypedDict, total=False):
12+
question: Required[str]
13+
14+
answer: Optional[str]
15+
16+
draft_answer: Optional[str]
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationCreateResponse"]
10+
11+
12+
class RemediationCreateResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing_extensions import Required, TypedDict
6+
7+
__all__ = ["RemediationEditAnswerParams"]
8+
9+
10+
class RemediationEditAnswerParams(TypedDict, total=False):
11+
project_id: Required[str]
12+
13+
answer: Required[str]
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationEditAnswerResponse"]
10+
11+
12+
class RemediationEditAnswerResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing_extensions import Required, TypedDict
6+
7+
__all__ = ["RemediationEditDraftAnswerParams"]
8+
9+
10+
class RemediationEditDraftAnswerParams(TypedDict, total=False):
11+
project_id: Required[str]
12+
13+
draft_answer: Required[str]
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationEditDraftAnswerResponse"]
10+
11+
12+
class RemediationEditDraftAnswerResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationGetResolvedLogsCountResponse"]
10+
11+
12+
class RemediationGetResolvedLogsCountResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
resolved_logs_count: int
30+
31+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
32+
33+
answer: Optional[str] = None
34+
35+
draft_answer: Optional[str] = None
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing import List, Union, Optional
6+
from datetime import datetime
7+
from typing_extensions import Literal, Annotated, TypedDict
8+
9+
from ..._utils import PropertyInfo
10+
11+
__all__ = ["RemediationListParams"]
12+
13+
14+
class RemediationListParams(TypedDict, total=False):
15+
created_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
16+
"""Filter remediations created at or before this timestamp"""
17+
18+
created_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
19+
"""Filter remediations created at or after this timestamp"""
20+
21+
last_edited_at_end: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
22+
"""Filter remediations last edited at or before this timestamp"""
23+
24+
last_edited_at_start: Annotated[Union[str, datetime, None], PropertyInfo(format="iso8601")]
25+
"""Filter remediations last edited at or after this timestamp"""
26+
27+
last_edited_by: Optional[str]
28+
"""Filter by last edited by user ID"""
29+
30+
limit: int
31+
32+
offset: int
33+
34+
order: Literal["asc", "desc"]
35+
36+
sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]]
37+
38+
status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]]
39+
"""Filter remediations that have ANY of these statuses (OR operation)"""
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Dict, List, Union, Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationListResolvedLogsResponse", "QueryLog", "QueryLogContext"]
10+
11+
12+
class QueryLogContext(BaseModel):
13+
content: str
14+
"""The actual content/text of the document."""
15+
16+
id: Optional[str] = None
17+
"""Unique identifier for the document. Useful for tracking documents"""
18+
19+
source: Optional[str] = None
20+
"""Source or origin of the document. Useful for citations."""
21+
22+
tags: Optional[List[str]] = None
23+
"""Tags or categories for the document. Useful for filtering"""
24+
25+
title: Optional[str] = None
26+
"""Title or heading of the document. Useful for display and context."""
27+
28+
29+
class QueryLog(BaseModel):
30+
id: str
31+
32+
created_at: datetime
33+
34+
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
35+
"""Format evaluation scores for frontend display with pass/fail status.
36+
37+
Returns: Dictionary mapping eval keys to their formatted representation: {
38+
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
39+
eval_scores is None.
40+
"""
41+
42+
is_bad_response: bool
43+
"""If an eval with should_escalate=True failed"""
44+
45+
project_id: str
46+
47+
question: str
48+
49+
remediation_id: str
50+
51+
was_cache_hit: Optional[bool] = None
52+
"""If similar query already answered, or None if cache was not checked"""
53+
54+
context: Optional[List[QueryLogContext]] = None
55+
"""RAG context used for the query"""
56+
57+
custom_metadata: Optional[object] = None
58+
"""Arbitrary metadata supplied by the user/system"""
59+
60+
custom_metadata_keys: Optional[List[str]] = None
61+
"""Keys of the custom metadata"""
62+
63+
eval_issue_labels: Optional[List[str]] = None
64+
"""Labels derived from evaluation scores"""
65+
66+
eval_scores: Optional[Dict[str, float]] = None
67+
"""Evaluation scores for the original response"""
68+
69+
eval_thresholds: Optional[Dict[str, Dict[str, Union[float, str]]]] = None
70+
"""Evaluation thresholds and directions at time of creation"""
71+
72+
evaluated_response: Optional[str] = None
73+
"""The response being evaluated from the RAG system (before any remediation)"""
74+
75+
primary_eval_issue: Optional[str] = None
76+
"""Primary issue identified in evaluation"""
77+
78+
primary_eval_issue_score: Optional[float] = None
79+
"""Score of the primary eval issue"""
80+
81+
82+
class RemediationListResolvedLogsResponse(BaseModel):
83+
query_logs: List[QueryLog]
84+
85+
total_count: int
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import List, Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationListResponse", "Remediation"]
10+
11+
12+
class Remediation(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
resolved_logs_count: int
30+
31+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
32+
33+
answer: Optional[str] = None
34+
35+
draft_answer: Optional[str] = None
36+
37+
38+
class RemediationListResponse(BaseModel):
39+
remediations: List[Remediation]
40+
41+
total_count: int
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationPauseResponse"]
10+
11+
12+
class RemediationPauseResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationPublishResponse"]
10+
11+
12+
class RemediationPublishResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationRetrieveResponse"]
10+
11+
12+
class RemediationRetrieveResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from typing import Optional
4+
from datetime import datetime
5+
from typing_extensions import Literal
6+
7+
from ..._models import BaseModel
8+
9+
__all__ = ["RemediationUnpauseResponse"]
10+
11+
12+
class RemediationUnpauseResponse(BaseModel):
13+
id: str
14+
15+
answered_at: Optional[datetime] = None
16+
17+
answered_by: Optional[str] = None
18+
19+
created_at: datetime
20+
21+
last_edited_at: Optional[datetime] = None
22+
23+
last_edited_by: Optional[str] = None
24+
25+
project_id: str
26+
27+
question: str
28+
29+
status: Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "NOT_STARTED", "PAUSED"]
30+
31+
answer: Optional[str] = None
32+
33+
draft_answer: Optional[str] = None

‎tests/api_resources/projects/test_evals.py‎

Lines changed: 679 additions & 0 deletions
Large diffs are not rendered by default.

‎tests/api_resources/projects/test_query_logs.py‎

Lines changed: 593 additions & 0 deletions
Large diffs are not rendered by default.

‎tests/api_resources/projects/test_remediations.py‎

Lines changed: 1224 additions & 0 deletions
Large diffs are not rendered by default.

‎tests/api_resources/test_projects.py‎

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
ProjectReturnSchema,
1414
ProjectRetrieveResponse,
1515
ProjectValidateResponse,
16+
ProjectInviteSmeResponse,
1617
ProjectRetrieveAnalyticsResponse,
1718
)
1819
from tests.utils import assert_matches_type
@@ -515,6 +516,60 @@ def test_path_params_increment_queries(self, client: Codex) -> None:
515516
project_id="",
516517
)
517518

519+
@pytest.mark.skip()
520+
@parametrize
521+
def test_method_invite_sme(self, client: Codex) -> None:
522+
project = client.projects.invite_sme(
523+
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
524+
email="email",
525+
page_type="query_log",
526+
url_query_string="url_query_string",
527+
)
528+
assert_matches_type(ProjectInviteSmeResponse, project, path=["response"])
529+
530+
@pytest.mark.skip()
531+
@parametrize
532+
def test_raw_response_invite_sme(self, client: Codex) -> None:
533+
response = client.projects.with_raw_response.invite_sme(
534+
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
535+
email="email",
536+
page_type="query_log",
537+
url_query_string="url_query_string",
538+
)
539+
540+
assert response.is_closed is True
541+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
542+
project = response.parse()
543+
assert_matches_type(ProjectInviteSmeResponse, project, path=["response"])
544+
545+
@pytest.mark.skip()
546+
@parametrize
547+
def test_streaming_response_invite_sme(self, client: Codex) -> None:
548+
with client.projects.with_streaming_response.invite_sme(
549+
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
550+
email="email",
551+
page_type="query_log",
552+
url_query_string="url_query_string",
553+
) as response:
554+
assert not response.is_closed
555+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
556+
557+
project = response.parse()
558+
assert_matches_type(ProjectInviteSmeResponse, project, path=["response"])
559+
560+
assert cast(Any, response.is_closed) is True
561+
562+
@pytest.mark.skip()
563+
@parametrize
564+
def test_path_params_invite_sme(self, client: Codex) -> None:
565+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
566+
client.projects.with_raw_response.invite_sme(
567+
project_id="",
568+
email="email",
569+
page_type="query_log",
570+
url_query_string="url_query_string",
571+
)
572+
518573
@pytest.mark.skip()
519574
@parametrize
520575
def test_method_retrieve_analytics(self, client: Codex) -> None:
@@ -1160,6 +1215,60 @@ async def test_path_params_increment_queries(self, async_client: AsyncCodex) ->
11601215
project_id="",
11611216
)
11621217

1218+
@pytest.mark.skip()
1219+
@parametrize
1220+
async def test_method_invite_sme(self, async_client: AsyncCodex) -> None:
1221+
project = await async_client.projects.invite_sme(
1222+
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
1223+
email="email",
1224+
page_type="query_log",
1225+
url_query_string="url_query_string",
1226+
)
1227+
assert_matches_type(ProjectInviteSmeResponse, project, path=["response"])
1228+
1229+
@pytest.mark.skip()
1230+
@parametrize
1231+
async def test_raw_response_invite_sme(self, async_client: AsyncCodex) -> None:
1232+
response = await async_client.projects.with_raw_response.invite_sme(
1233+
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
1234+
email="email",
1235+
page_type="query_log",
1236+
url_query_string="url_query_string",
1237+
)
1238+
1239+
assert response.is_closed is True
1240+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
1241+
project = await response.parse()
1242+
assert_matches_type(ProjectInviteSmeResponse, project, path=["response"])
1243+
1244+
@pytest.mark.skip()
1245+
@parametrize
1246+
async def test_streaming_response_invite_sme(self, async_client: AsyncCodex) -> None:
1247+
async with async_client.projects.with_streaming_response.invite_sme(
1248+
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
1249+
email="email",
1250+
page_type="query_log",
1251+
url_query_string="url_query_string",
1252+
) as response:
1253+
assert not response.is_closed
1254+
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
1255+
1256+
project = await response.parse()
1257+
assert_matches_type(ProjectInviteSmeResponse, project, path=["response"])
1258+
1259+
assert cast(Any, response.is_closed) is True
1260+
1261+
@pytest.mark.skip()
1262+
@parametrize
1263+
async def test_path_params_invite_sme(self, async_client: AsyncCodex) -> None:
1264+
with pytest.raises(ValueError, match=r"Expected a non-empty value for `project_id` but received ''"):
1265+
await async_client.projects.with_raw_response.invite_sme(
1266+
project_id="",
1267+
email="email",
1268+
page_type="query_log",
1269+
url_query_string="url_query_string",
1270+
)
1271+
11631272
@pytest.mark.skip()
11641273
@parametrize
11651274
async def test_method_retrieve_analytics(self, async_client: AsyncCodex) -> None:

0 commit comments

Comments
 (0)
Please sign in to comment.