Skip to content

Commit 32906a5

Browse files
feat(api): api update
1 parent e6a7b74 commit 32906a5

File tree

8 files changed

+172
-4
lines changed

8 files changed

+172
-4
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
configured_endpoints: 43
2-
openapi_spec_hash: 1d2bab7a30685ae6ba2f0a98abf2dd54
2+
openapi_spec_hash: b7beefbd38b4fcdd191cdb81a18a023b
33
config_hash: 5e459b33c53ffa6e554087a779bdb790

src/codex/resources/projects/clusters.py

+26-2
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,21 @@ def list(
5252
*,
5353
eval_issue_types: List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query"]]
5454
| NotGiven = NOT_GIVEN,
55+
instruction_adherence_failure: Optional[Literal["html_format", "content_structure"]] | NotGiven = NOT_GIVEN,
5556
limit: int | NotGiven = NOT_GIVEN,
5657
offset: int | NotGiven = NOT_GIVEN,
5758
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
58-
sort: Optional[Literal["created_at", "answered_at", "cluster_frequency_count", "custom_rank", "eval_score"]]
59+
sort: Optional[
60+
Literal[
61+
"created_at",
62+
"answered_at",
63+
"cluster_frequency_count",
64+
"custom_rank",
65+
"eval_score",
66+
"html_format_score",
67+
"content_structure_score",
68+
]
69+
]
5970
| NotGiven = NOT_GIVEN,
6071
states: List[Literal["unanswered", "draft", "published", "published_with_draft"]] | NotGiven = NOT_GIVEN,
6172
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -90,6 +101,7 @@ def list(
90101
query=maybe_transform(
91102
{
92103
"eval_issue_types": eval_issue_types,
104+
"instruction_adherence_failure": instruction_adherence_failure,
93105
"limit": limit,
94106
"offset": offset,
95107
"order": order,
@@ -167,10 +179,21 @@ def list(
167179
*,
168180
eval_issue_types: List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query"]]
169181
| NotGiven = NOT_GIVEN,
182+
instruction_adherence_failure: Optional[Literal["html_format", "content_structure"]] | NotGiven = NOT_GIVEN,
170183
limit: int | NotGiven = NOT_GIVEN,
171184
offset: int | NotGiven = NOT_GIVEN,
172185
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
173-
sort: Optional[Literal["created_at", "answered_at", "cluster_frequency_count", "custom_rank", "eval_score"]]
186+
sort: Optional[
187+
Literal[
188+
"created_at",
189+
"answered_at",
190+
"cluster_frequency_count",
191+
"custom_rank",
192+
"eval_score",
193+
"html_format_score",
194+
"content_structure_score",
195+
]
196+
]
174197
| NotGiven = NOT_GIVEN,
175198
states: List[Literal["unanswered", "draft", "published", "published_with_draft"]] | NotGiven = NOT_GIVEN,
176199
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -205,6 +228,7 @@ def list(
205228
query=maybe_transform(
206229
{
207230
"eval_issue_types": eval_issue_types,
231+
"instruction_adherence_failure": instruction_adherence_failure,
208232
"limit": limit,
209233
"offset": offset,
210234
"order": order,

src/codex/types/project_retrieve_analytics_response.py

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
class AnswersPublishedAnswersByAuthor(BaseModel):
1818
answers_published: int
1919

20+
email: str
21+
2022
name: str
2123

2224
user_id: str

src/codex/types/projects/cluster_list_params.py

+13-1
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,24 @@
1111
class ClusterListParams(TypedDict, total=False):
1212
eval_issue_types: List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query"]]
1313

14+
instruction_adherence_failure: Optional[Literal["html_format", "content_structure"]]
15+
1416
limit: int
1517

1618
offset: int
1719

1820
order: Literal["asc", "desc"]
1921

20-
sort: Optional[Literal["created_at", "answered_at", "cluster_frequency_count", "custom_rank", "eval_score"]]
22+
sort: Optional[
23+
Literal[
24+
"created_at",
25+
"answered_at",
26+
"cluster_frequency_count",
27+
"custom_rank",
28+
"eval_score",
29+
"html_format_score",
30+
"content_structure_score",
31+
]
32+
]
2133

2234
states: List[Literal["unanswered", "draft", "published", "published_with_draft"]]

src/codex/types/projects/cluster_list_response.py

+44
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,31 @@
99
__all__ = [
1010
"ClusterListResponse",
1111
"ManagedMetadata",
12+
"ManagedMetadataContentStructureScores",
1213
"ManagedMetadataContextSufficiency",
14+
"ManagedMetadataHTMLFormatScores",
1315
"ManagedMetadataQueryEaseCustomized",
1416
"ManagedMetadataResponseHelpfulness",
1517
"ManagedMetadataTrustworthiness",
1618
]
1719

1820

21+
class ManagedMetadataContentStructureScores(BaseModel):
22+
average: Optional[float] = None
23+
"""The average of all scores."""
24+
25+
latest: Optional[float] = None
26+
"""The most recent score."""
27+
28+
max: Optional[float] = None
29+
"""The maximum score."""
30+
31+
min: Optional[float] = None
32+
"""The minimum score."""
33+
34+
scores: Optional[List[float]] = None
35+
36+
1937
class ManagedMetadataContextSufficiency(BaseModel):
2038
average: Optional[float] = None
2139
"""The average of all scores."""
@@ -32,6 +50,22 @@ class ManagedMetadataContextSufficiency(BaseModel):
3250
scores: Optional[List[float]] = None
3351

3452

53+
class ManagedMetadataHTMLFormatScores(BaseModel):
54+
average: Optional[float] = None
55+
"""The average of all scores."""
56+
57+
latest: Optional[float] = None
58+
"""The most recent score."""
59+
60+
max: Optional[float] = None
61+
"""The maximum score."""
62+
63+
min: Optional[float] = None
64+
"""The minimum score."""
65+
66+
scores: Optional[List[float]] = None
67+
68+
3569
class ManagedMetadataQueryEaseCustomized(BaseModel):
3670
average: Optional[float] = None
3771
"""The average of all scores."""
@@ -93,13 +127,19 @@ class ManagedMetadata(BaseModel):
93127
latest_location: Optional[str] = None
94128
"""The most recent location string."""
95129

130+
content_structure_scores: Optional[ManagedMetadataContentStructureScores] = None
131+
"""Holds a list of scores and computes aggregate statistics."""
132+
96133
context_sufficiency: Optional[ManagedMetadataContextSufficiency] = None
97134
"""Holds a list of scores and computes aggregate statistics."""
98135

99136
contexts: Optional[List[str]] = None
100137

101138
entry_points: Optional[List[str]] = None
102139

140+
html_format_scores: Optional[ManagedMetadataHTMLFormatScores] = None
141+
"""Holds a list of scores and computes aggregate statistics."""
142+
103143
llm_responses: Optional[List[str]] = None
104144

105145
locations: Optional[List[str]] = None
@@ -136,6 +176,8 @@ class ClusterListResponse(BaseModel):
136176

137177
client_query_metadata: Optional[List[object]] = None
138178

179+
content_structure_score: Optional[float] = None
180+
139181
draft_answer: Optional[str] = None
140182

141183
draft_answer_last_edited: Optional[datetime] = None
@@ -147,4 +189,6 @@ class ClusterListResponse(BaseModel):
147189
frequency_count: Optional[int] = None
148190
"""number of times the entry matched for a /query request"""
149191

192+
html_format_score: Optional[float] = None
193+
150194
representative_entry_id: Optional[str] = None

src/codex/types/projects/entry.py

+44
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,31 @@
99
__all__ = [
1010
"Entry",
1111
"ManagedMetadata",
12+
"ManagedMetadataContentStructureScores",
1213
"ManagedMetadataContextSufficiency",
14+
"ManagedMetadataHTMLFormatScores",
1315
"ManagedMetadataQueryEaseCustomized",
1416
"ManagedMetadataResponseHelpfulness",
1517
"ManagedMetadataTrustworthiness",
1618
]
1719

1820

21+
class ManagedMetadataContentStructureScores(BaseModel):
22+
average: Optional[float] = None
23+
"""The average of all scores."""
24+
25+
latest: Optional[float] = None
26+
"""The most recent score."""
27+
28+
max: Optional[float] = None
29+
"""The maximum score."""
30+
31+
min: Optional[float] = None
32+
"""The minimum score."""
33+
34+
scores: Optional[List[float]] = None
35+
36+
1937
class ManagedMetadataContextSufficiency(BaseModel):
2038
average: Optional[float] = None
2139
"""The average of all scores."""
@@ -32,6 +50,22 @@ class ManagedMetadataContextSufficiency(BaseModel):
3250
scores: Optional[List[float]] = None
3351

3452

53+
class ManagedMetadataHTMLFormatScores(BaseModel):
54+
average: Optional[float] = None
55+
"""The average of all scores."""
56+
57+
latest: Optional[float] = None
58+
"""The most recent score."""
59+
60+
max: Optional[float] = None
61+
"""The maximum score."""
62+
63+
min: Optional[float] = None
64+
"""The minimum score."""
65+
66+
scores: Optional[List[float]] = None
67+
68+
3569
class ManagedMetadataQueryEaseCustomized(BaseModel):
3670
average: Optional[float] = None
3771
"""The average of all scores."""
@@ -93,13 +127,19 @@ class ManagedMetadata(BaseModel):
93127
latest_location: Optional[str] = None
94128
"""The most recent location string."""
95129

130+
content_structure_scores: Optional[ManagedMetadataContentStructureScores] = None
131+
"""Holds a list of scores and computes aggregate statistics."""
132+
96133
context_sufficiency: Optional[ManagedMetadataContextSufficiency] = None
97134
"""Holds a list of scores and computes aggregate statistics."""
98135

99136
contexts: Optional[List[str]] = None
100137

101138
entry_points: Optional[List[str]] = None
102139

140+
html_format_scores: Optional[ManagedMetadataHTMLFormatScores] = None
141+
"""Holds a list of scores and computes aggregate statistics."""
142+
103143
llm_responses: Optional[List[str]] = None
104144

105145
locations: Optional[List[str]] = None
@@ -134,6 +174,8 @@ class Entry(BaseModel):
134174

135175
client_query_metadata: Optional[List[object]] = None
136176

177+
content_structure_score: Optional[float] = None
178+
137179
draft_answer: Optional[str] = None
138180

139181
draft_answer_last_edited: Optional[datetime] = None
@@ -144,3 +186,5 @@ class Entry(BaseModel):
144186

145187
frequency_count: Optional[int] = None
146188
"""number of times the entry matched for a /query request"""
189+
190+
html_format_score: Optional[float] = None

src/codex/types/projects/entry_query_response.py

+40
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,31 @@
88
"EntryQueryResponse",
99
"Entry",
1010
"EntryManagedMetadata",
11+
"EntryManagedMetadataContentStructureScores",
1112
"EntryManagedMetadataContextSufficiency",
13+
"EntryManagedMetadataHTMLFormatScores",
1214
"EntryManagedMetadataQueryEaseCustomized",
1315
"EntryManagedMetadataResponseHelpfulness",
1416
"EntryManagedMetadataTrustworthiness",
1517
]
1618

1719

20+
class EntryManagedMetadataContentStructureScores(BaseModel):
21+
average: Optional[float] = None
22+
"""The average of all scores."""
23+
24+
latest: Optional[float] = None
25+
"""The most recent score."""
26+
27+
max: Optional[float] = None
28+
"""The maximum score."""
29+
30+
min: Optional[float] = None
31+
"""The minimum score."""
32+
33+
scores: Optional[List[float]] = None
34+
35+
1836
class EntryManagedMetadataContextSufficiency(BaseModel):
1937
average: Optional[float] = None
2038
"""The average of all scores."""
@@ -31,6 +49,22 @@ class EntryManagedMetadataContextSufficiency(BaseModel):
3149
scores: Optional[List[float]] = None
3250

3351

52+
class EntryManagedMetadataHTMLFormatScores(BaseModel):
53+
average: Optional[float] = None
54+
"""The average of all scores."""
55+
56+
latest: Optional[float] = None
57+
"""The most recent score."""
58+
59+
max: Optional[float] = None
60+
"""The maximum score."""
61+
62+
min: Optional[float] = None
63+
"""The minimum score."""
64+
65+
scores: Optional[List[float]] = None
66+
67+
3468
class EntryManagedMetadataQueryEaseCustomized(BaseModel):
3569
average: Optional[float] = None
3670
"""The average of all scores."""
@@ -92,13 +126,19 @@ class EntryManagedMetadata(BaseModel):
92126
latest_location: Optional[str] = None
93127
"""The most recent location string."""
94128

129+
content_structure_scores: Optional[EntryManagedMetadataContentStructureScores] = None
130+
"""Holds a list of scores and computes aggregate statistics."""
131+
95132
context_sufficiency: Optional[EntryManagedMetadataContextSufficiency] = None
96133
"""Holds a list of scores and computes aggregate statistics."""
97134

98135
contexts: Optional[List[str]] = None
99136

100137
entry_points: Optional[List[str]] = None
101138

139+
html_format_scores: Optional[EntryManagedMetadataHTMLFormatScores] = None
140+
"""Holds a list of scores and computes aggregate statistics."""
141+
102142
llm_responses: Optional[List[str]] = None
103143

104144
locations: Optional[List[str]] = None

tests/api_resources/projects/test_clusters.py

+2
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ def test_method_list_with_all_params(self, client: Codex) -> None:
3232
cluster = client.projects.clusters.list(
3333
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
3434
eval_issue_types=["hallucination"],
35+
instruction_adherence_failure="html_format",
3536
limit=1,
3637
offset=0,
3738
order="asc",
@@ -146,6 +147,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> No
146147
cluster = await async_client.projects.clusters.list(
147148
project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
148149
eval_issue_types=["hallucination"],
150+
instruction_adherence_failure="html_format",
149151
limit=1,
150152
offset=0,
151153
order="asc",

0 commit comments

Comments
 (0)