Skip to content

Commit 8701aa6

Browse files
committed
feat(api): add incomplete state (openai#846)
1 parent 3f4b743 commit 8701aa6

File tree

7 files changed

+61
-39
lines changed

7 files changed

+61
-39
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 64
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-47007cc1aa5bc7b74107a99b377925978a0bd376ed67bdae724e80d5d0b63d57.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml

src/resources/batches.ts

+6-3
Original file line numberDiff line numberDiff line change
@@ -215,9 +215,11 @@ export interface BatchCreateParams {
215215

216216
/**
217217
* The endpoint to be used for all requests in the batch. Currently
218-
* `/v1/chat/completions` and `/v1/embeddings` are supported.
218+
* `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
219+
* Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
220+
* embedding inputs across all requests in the batch.
219221
*/
220-
endpoint: '/v1/chat/completions' | '/v1/embeddings';
222+
endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';
221223

222224
/**
223225
* The ID of an uploaded file that contains requests for the new batch.
@@ -227,7 +229,8 @@ export interface BatchCreateParams {
227229
*
228230
* Your input file must be formatted as a
229231
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
230-
* and must be uploaded with the purpose `batch`.
232+
* and must be uploaded with the purpose `batch`. The file can contain up to 50,000
233+
* requests, and can be up to 100 MB in size.
231234
*/
232235
input_file_id: string;
233236

src/resources/beta/assistants.ts

+9-6
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,9 @@ export interface Assistant {
144144

145145
/**
146146
* Specifies the format that the model must output. Compatible with
147-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
148-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
147+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
148+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
149+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
149150
*
150151
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
151152
* message the model generates is valid JSON.
@@ -1047,8 +1048,9 @@ export interface AssistantCreateParams {
10471048

10481049
/**
10491050
* Specifies the format that the model must output. Compatible with
1050-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1051-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1051+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1052+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1053+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
10521054
*
10531055
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
10541056
* message the model generates is valid JSON.
@@ -1193,8 +1195,9 @@ export interface AssistantUpdateParams {
11931195

11941196
/**
11951197
* Specifies the format that the model must output. Compatible with
1196-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1197-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1198+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1199+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1200+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
11981201
*
11991202
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
12001203
* message the model generates is valid JSON.

src/resources/beta/threads/runs/runs.ts

+21-14
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ export class Runs extends APIResource {
176176
break;
177177
//We return the run in any terminal state.
178178
case 'requires_action':
179+
case 'incomplete':
179180
case 'cancelled':
180181
case 'completed':
181182
case 'failed':
@@ -409,8 +410,9 @@ export interface Run {
409410

410411
/**
411412
* Specifies the format that the model must output. Compatible with
412-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
413-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
413+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
414+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
415+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
414416
*
415417
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
416418
* message the model generates is valid JSON.
@@ -432,8 +434,8 @@ export interface Run {
432434

433435
/**
434436
* The status of the run, which can be either `queued`, `in_progress`,
435-
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
436-
* `expired`.
437+
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
438+
* `incomplete`, or `expired`.
437439
*/
438440
status: RunStatus;
439441

@@ -584,8 +586,8 @@ export namespace Run {
584586

585587
/**
586588
* The status of the run, which can be either `queued`, `in_progress`,
587-
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
588-
* `expired`.
589+
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
590+
* `incomplete`, or `expired`.
589591
*/
590592
export type RunStatus =
591593
| 'queued'
@@ -595,6 +597,7 @@ export type RunStatus =
595597
| 'cancelled'
596598
| 'failed'
597599
| 'completed'
600+
| 'incomplete'
598601
| 'expired';
599602

600603
export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming;
@@ -684,8 +687,9 @@ export interface RunCreateParamsBase {
684687

685688
/**
686689
* Specifies the format that the model must output. Compatible with
687-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
688-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
690+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
691+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
692+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
689693
*
690694
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
691695
* message the model generates is valid JSON.
@@ -945,8 +949,9 @@ export interface RunCreateAndPollParams {
945949

946950
/**
947951
* Specifies the format that the model must output. Compatible with
948-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
949-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
952+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
953+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
954+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
950955
*
951956
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
952957
* message the model generates is valid JSON.
@@ -1152,8 +1157,9 @@ export interface RunCreateAndStreamParams {
11521157

11531158
/**
11541159
* Specifies the format that the model must output. Compatible with
1155-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1156-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1160+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1161+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1162+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
11571163
*
11581164
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
11591165
* message the model generates is valid JSON.
@@ -1359,8 +1365,9 @@ export interface RunStreamParams {
13591365

13601366
/**
13611367
* Specifies the format that the model must output. Compatible with
1362-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1363-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1368+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1369+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1370+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
13641371
*
13651372
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
13661373
* message the model generates is valid JSON.

src/resources/beta/threads/threads.ts

+12-8
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,9 @@ export interface AssistantResponseFormat {
130130

131131
/**
132132
* Specifies the format that the model must output. Compatible with
133-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
134-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
133+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
134+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
135+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
135136
*
136137
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
137138
* message the model generates is valid JSON.
@@ -516,8 +517,9 @@ export interface ThreadCreateAndRunParamsBase {
516517

517518
/**
518519
* Specifies the format that the model must output. Compatible with
519-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
520-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
520+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
521+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
522+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
521523
*
522524
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
523525
* message the model generates is valid JSON.
@@ -875,8 +877,9 @@ export interface ThreadCreateAndRunPollParams {
875877

876878
/**
877879
* Specifies the format that the model must output. Compatible with
878-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
879-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
880+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
881+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
882+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
880883
*
881884
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
882885
* message the model generates is valid JSON.
@@ -1206,8 +1209,9 @@ export interface ThreadCreateAndRunStreamParams {
12061209

12071210
/**
12081211
* Specifies the format that the model must output. Compatible with
1209-
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1210-
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1212+
* [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1213+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1214+
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
12111215
*
12121216
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
12131217
* message the model generates is valid JSON.

src/resources/beta/vector-stores/file-batches.ts

+1
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ export class FileBatches extends APIResource {
138138
await sleep(sleepInterval);
139139
break;
140140
case 'failed':
141+
case 'cancelled':
141142
case 'completed':
142143
return batch;
143144
}

src/resources/files.ts

+11-7
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,18 @@ import { Page } from '../pagination';
1212

1313
export class Files extends APIResource {
1414
/**
15-
* Upload a file that can be used across various endpoints. The size of all the
16-
* files uploaded by one organization can be up to 100 GB.
15+
* Upload a file that can be used across various endpoints. Individual files can be
16+
* up to 512 MB, and the size of all files uploaded by one organization can be up
17+
* to 100 GB.
1718
*
18-
* The size of individual files can be a maximum of 512 MB or 2 million tokens for
19-
* Assistants. See the
20-
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
21-
* learn more about the types of files supported. The Fine-tuning API only supports
22-
* `.jsonl` files.
19+
* The Assistants API supports files up to 2 million tokens and of specific file
20+
* types. See the
21+
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
22+
* details.
23+
*
24+
* The Fine-tuning API only supports `.jsonl` files.
25+
*
26+
* The Batch API only supports `.jsonl` files up to 100 MB in size.
2327
*
2428
* Please [contact us](https://help.openai.com/) if you need to increase these
2529
* storage limits.

0 commit comments

Comments
 (0)