Skip to content

Commit 698a0c9

Browse files
Stainless Botstainless-app[bot]
Stainless Bot
authored andcommitted
feat(api): add o1 models (#49)
See https://platform.openai.com/docs/guides/reasoning for details.
1 parent 082e6ae commit 698a0c9

9 files changed

+98
-53
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 68
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml

betaassistant.go

+16-12
Original file line numberDiff line numberDiff line change
@@ -1860,7 +1860,8 @@ type FileSearchToolFileSearch struct {
18601860
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
18611861
// for more information.
18621862
MaxNumResults int64 `json:"max_num_results"`
1863-
// The ranking options for the file search.
1863+
// The ranking options for the file search. If not specified, the file search tool
1864+
// will use the `auto` ranker and a score_threshold of 0.
18641865
//
18651866
// See the
18661867
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
@@ -1886,26 +1887,27 @@ func (r fileSearchToolFileSearchJSON) RawJSON() string {
18861887
return r.raw
18871888
}
18881889

1889-
// The ranking options for the file search.
1890+
// The ranking options for the file search. If not specified, the file search tool
1891+
// will use the `auto` ranker and a score_threshold of 0.
18901892
//
18911893
// See the
18921894
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
18931895
// for more information.
18941896
type FileSearchToolFileSearchRankingOptions struct {
1897+
// The score threshold for the file search. All values must be a floating point
1898+
// number between 0 and 1.
1899+
ScoreThreshold float64 `json:"score_threshold,required"`
18951900
// The ranker to use for the file search. If not specified will use the `auto`
18961901
// ranker.
18971902
Ranker FileSearchToolFileSearchRankingOptionsRanker `json:"ranker"`
1898-
// The score threshold for the file search. All values must be a floating point
1899-
// number between 0 and 1.
1900-
ScoreThreshold float64 `json:"score_threshold"`
1901-
JSON fileSearchToolFileSearchRankingOptionsJSON `json:"-"`
1903+
JSON fileSearchToolFileSearchRankingOptionsJSON `json:"-"`
19021904
}
19031905

19041906
// fileSearchToolFileSearchRankingOptionsJSON contains the JSON metadata for the
19051907
// struct [FileSearchToolFileSearchRankingOptions]
19061908
type fileSearchToolFileSearchRankingOptionsJSON struct {
1907-
Ranker apijson.Field
19081909
ScoreThreshold apijson.Field
1910+
Ranker apijson.Field
19091911
raw string
19101912
ExtraFields map[string]apijson.Field
19111913
}
@@ -1961,7 +1963,8 @@ type FileSearchToolFileSearchParam struct {
19611963
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
19621964
// for more information.
19631965
MaxNumResults param.Field[int64] `json:"max_num_results"`
1964-
// The ranking options for the file search.
1966+
// The ranking options for the file search. If not specified, the file search tool
1967+
// will use the `auto` ranker and a score_threshold of 0.
19651968
//
19661969
// See the
19671970
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
@@ -1973,18 +1976,19 @@ func (r FileSearchToolFileSearchParam) MarshalJSON() (data []byte, err error) {
19731976
return apijson.MarshalRoot(r)
19741977
}
19751978

1976-
// The ranking options for the file search.
1979+
// The ranking options for the file search. If not specified, the file search tool
1980+
// will use the `auto` ranker and a score_threshold of 0.
19771981
//
19781982
// See the
19791983
// [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
19801984
// for more information.
19811985
type FileSearchToolFileSearchRankingOptionsParam struct {
1986+
// The score threshold for the file search. All values must be a floating point
1987+
// number between 0 and 1.
1988+
ScoreThreshold param.Field[float64] `json:"score_threshold,required"`
19821989
// The ranker to use for the file search. If not specified will use the `auto`
19831990
// ranker.
19841991
Ranker param.Field[FileSearchToolFileSearchRankingOptionsRanker] `json:"ranker"`
1985-
// The score threshold for the file search. All values must be a floating point
1986-
// number between 0 and 1.
1987-
ScoreThreshold param.Field[float64] `json:"score_threshold"`
19881992
}
19891993

19901994
func (r FileSearchToolFileSearchRankingOptionsParam) MarshalJSON() (data []byte, err error) {

chat.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,13 @@ func NewChatService(opts ...option.RequestOption) (r *ChatService) {
3030
type ChatModel = string
3131

3232
const (
33+
ChatModelO1Preview ChatModel = "o1-preview"
34+
ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12"
35+
ChatModelO1Mini ChatModel = "o1-mini"
36+
ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12"
3337
ChatModelGPT4o ChatModel = "gpt-4o"
34-
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
3538
ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06"
39+
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
3640
ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest"
3741
ChatModelGPT4oMini ChatModel = "gpt-4o-mini"
3842
ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"

chatcompletion.go

+28-18
Original file line numberDiff line numberDiff line change
@@ -1483,13 +1483,17 @@ type ChatCompletionNewParams struct {
14831483
// returns the log probabilities of each output token returned in the `content` of
14841484
// `message`.
14851485
Logprobs param.Field[bool] `json:"logprobs"`
1486+
// An upper bound for the number of tokens that can be generated for a completion,
1487+
// including visible output tokens and
1488+
// [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
1489+
MaxCompletionTokens param.Field[int64] `json:"max_completion_tokens"`
14861490
// The maximum number of [tokens](/tokenizer) that can be generated in the chat
1487-
// completion.
1491+
// completion. This value can be used to control
1492+
// [costs](https://openai.com/api/pricing/) for text generated via API.
14881493
//
1489-
// The total length of input tokens and generated tokens is limited by the model's
1490-
// context length.
1491-
// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
1492-
// for counting tokens.
1494+
// This value is now deprecated in favor of `max_completion_tokens`, and is not
1495+
// compatible with
1496+
// [o1 series models](https://platform.openai.com/docs/guides/reasoning).
14931497
MaxTokens param.Field[int64] `json:"max_tokens"`
14941498
// How many chat completion choices to generate for each input message. Note that
14951499
// you will be charged based on the number of generated tokens across all of the
@@ -1512,11 +1516,11 @@ type ChatCompletionNewParams struct {
15121516
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
15131517
//
15141518
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
1515-
// Outputs which guarantees the model will match your supplied JSON schema. Learn
1516-
// more in the
1519+
// Outputs which ensures the model will match your supplied JSON schema. Learn more
1520+
// in the
15171521
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
15181522
//
1519-
// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1523+
// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
15201524
// message the model generates is valid JSON.
15211525
//
15221526
// **Important:** when using JSON mode, you **must** also instruct the model to
@@ -1536,8 +1540,11 @@ type ChatCompletionNewParams struct {
15361540
// Specifies the latency tier to use for processing the request. This parameter is
15371541
// relevant for customers subscribed to the scale tier service:
15381542
//
1539-
// - If set to 'auto', the system will utilize scale tier credits until they are
1540-
// exhausted.
1543+
// - If set to 'auto', and the Project is Scale tier enabled, the system will
1544+
// utilize scale tier credits until they are exhausted.
1545+
// - If set to 'auto', and the Project is not Scale tier enabled, the request will
1546+
// be processed using the default service tier with a lower uptime SLA and no
1547+
// latency guarentee.
15411548
// - If set to 'default', the request will be processed using the default service
15421549
// tier with a lower uptime SLA and no latency guarentee.
15431550
// - When not set, the default behavior is 'auto'.
@@ -1655,11 +1662,11 @@ func (r ChatCompletionNewParamsFunction) MarshalJSON() (data []byte, err error)
16551662
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
16561663
//
16571664
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
1658-
// Outputs which guarantees the model will match your supplied JSON schema. Learn
1659-
// more in the
1665+
// Outputs which ensures the model will match your supplied JSON schema. Learn more
1666+
// in the
16601667
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
16611668
//
1662-
// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1669+
// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
16631670
// message the model generates is valid JSON.
16641671
//
16651672
// **Important:** when using JSON mode, you **must** also instruct the model to
@@ -1689,11 +1696,11 @@ func (r ChatCompletionNewParamsResponseFormat) ImplementsChatCompletionNewParams
16891696
// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
16901697
//
16911698
// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
1692-
// Outputs which guarantees the model will match your supplied JSON schema. Learn
1693-
// more in the
1699+
// Outputs which ensures the model will match your supplied JSON schema. Learn more
1700+
// in the
16941701
// [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
16951702
//
1696-
// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1703+
// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
16971704
// message the model generates is valid JSON.
16981705
//
16991706
// **Important:** when using JSON mode, you **must** also instruct the model to
@@ -1731,8 +1738,11 @@ func (r ChatCompletionNewParamsResponseFormatType) IsKnown() bool {
17311738
// Specifies the latency tier to use for processing the request. This parameter is
17321739
// relevant for customers subscribed to the scale tier service:
17331740
//
1734-
// - If set to 'auto', the system will utilize scale tier credits until they are
1735-
// exhausted.
1741+
// - If set to 'auto', and the Project is Scale tier enabled, the system will
1742+
// utilize scale tier credits until they are exhausted.
1743+
// - If set to 'auto', and the Project is not Scale tier enabled, the request will
1744+
// be processed using the default service tier with a lower uptime SLA and no
1745+
// latency guarentee.
17361746
// - If set to 'default', the request will be processed using the default service
17371747
// tier with a lower uptime SLA and no latency guarentee.
17381748
// - When not set, the default behavior is 'auto'.

chatcompletion_test.go

+7-6
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
3232
Role: openai.F(openai.ChatCompletionSystemMessageParamRoleSystem),
3333
Name: openai.F("name"),
3434
}}),
35-
Model: openai.F(openai.ChatModelGPT4o),
35+
Model: openai.F(openai.ChatModelO1Preview),
3636
FrequencyPenalty: openai.F(-2.000000),
3737
FunctionCall: openai.F[openai.ChatCompletionNewParamsFunctionCallUnion](openai.ChatCompletionNewParamsFunctionCallString(openai.ChatCompletionNewParamsFunctionCallStringNone)),
3838
Functions: openai.F([]openai.ChatCompletionNewParamsFunction{{
@@ -45,11 +45,12 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
4545
LogitBias: openai.F(map[string]int64{
4646
"foo": int64(0),
4747
}),
48-
Logprobs: openai.F(true),
49-
MaxTokens: openai.F(int64(0)),
50-
N: openai.F(int64(1)),
51-
ParallelToolCalls: openai.F(true),
52-
PresencePenalty: openai.F(-2.000000),
48+
Logprobs: openai.F(true),
49+
MaxCompletionTokens: openai.F(int64(0)),
50+
MaxTokens: openai.F(int64(0)),
51+
N: openai.F(int64(1)),
52+
ParallelToolCalls: openai.F(true),
53+
PresencePenalty: openai.F(-2.000000),
5354
ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](shared.ResponseFormatTextParam{
5455
Type: openai.F(shared.ResponseFormatTextTypeText),
5556
}),

client_test.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func TestUserAgentHeader(t *testing.T) {
4141
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
4242
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
4343
}}),
44-
Model: openai.F(openai.ChatModelGPT4o),
44+
Model: openai.F(openai.ChatModelO1Preview),
4545
})
4646
if userAgent != fmt.Sprintf("OpenAI/Go %s", internal.PackageVersion) {
4747
t.Errorf("Expected User-Agent to be correct, but got: %#v", userAgent)
@@ -70,7 +70,7 @@ func TestRetryAfter(t *testing.T) {
7070
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
7171
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
7272
}}),
73-
Model: openai.F(openai.ChatModelGPT4o),
73+
Model: openai.F(openai.ChatModelO1Preview),
7474
})
7575
if err == nil || res != nil {
7676
t.Error("Expected there to be a cancel error and for the response to be nil")
@@ -102,7 +102,7 @@ func TestRetryAfterMs(t *testing.T) {
102102
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
103103
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
104104
}}),
105-
Model: openai.F(openai.ChatModelGPT4o),
105+
Model: openai.F(openai.ChatModelO1Preview),
106106
})
107107
if err == nil || res != nil {
108108
t.Error("Expected there to be a cancel error and for the response to be nil")
@@ -130,7 +130,7 @@ func TestContextCancel(t *testing.T) {
130130
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
131131
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
132132
}}),
133-
Model: openai.F(openai.ChatModelGPT4o),
133+
Model: openai.F(openai.ChatModelO1Preview),
134134
})
135135
if err == nil || res != nil {
136136
t.Error("Expected there to be a cancel error and for the response to be nil")
@@ -155,7 +155,7 @@ func TestContextCancelDelay(t *testing.T) {
155155
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
156156
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
157157
}}),
158-
Model: openai.F(openai.ChatModelGPT4o),
158+
Model: openai.F(openai.ChatModelO1Preview),
159159
})
160160
if err == nil || res != nil {
161161
t.Error("expected there to be a cancel error and for the response to be nil")
@@ -186,7 +186,7 @@ func TestContextDeadline(t *testing.T) {
186186
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
187187
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
188188
}}),
189-
Model: openai.F(openai.ChatModelGPT4o),
189+
Model: openai.F(openai.ChatModelO1Preview),
190190
})
191191
if err == nil || res != nil {
192192
t.Error("expected there to be a deadline error and for the response to be nil")

completion.go

+33-7
Original file line numberDiff line numberDiff line change
@@ -197,17 +197,20 @@ type CompletionUsage struct {
197197
// Number of tokens in the prompt.
198198
PromptTokens int64 `json:"prompt_tokens,required"`
199199
// Total number of tokens used in the request (prompt + completion).
200-
TotalTokens int64 `json:"total_tokens,required"`
201-
JSON completionUsageJSON `json:"-"`
200+
TotalTokens int64 `json:"total_tokens,required"`
201+
// Breakdown of tokens used in a completion.
202+
CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
203+
JSON completionUsageJSON `json:"-"`
202204
}
203205

204206
// completionUsageJSON contains the JSON metadata for the struct [CompletionUsage]
205207
type completionUsageJSON struct {
206-
CompletionTokens apijson.Field
207-
PromptTokens apijson.Field
208-
TotalTokens apijson.Field
209-
raw string
210-
ExtraFields map[string]apijson.Field
208+
CompletionTokens apijson.Field
209+
PromptTokens apijson.Field
210+
TotalTokens apijson.Field
211+
CompletionTokensDetails apijson.Field
212+
raw string
213+
ExtraFields map[string]apijson.Field
211214
}
212215

213216
func (r *CompletionUsage) UnmarshalJSON(data []byte) (err error) {
@@ -218,6 +221,29 @@ func (r completionUsageJSON) RawJSON() string {
218221
return r.raw
219222
}
220223

224+
// Breakdown of tokens used in a completion.
225+
type CompletionUsageCompletionTokensDetails struct {
226+
// Tokens generated by the model for reasoning.
227+
ReasoningTokens int64 `json:"reasoning_tokens"`
228+
JSON completionUsageCompletionTokensDetailsJSON `json:"-"`
229+
}
230+
231+
// completionUsageCompletionTokensDetailsJSON contains the JSON metadata for the
232+
// struct [CompletionUsageCompletionTokensDetails]
233+
type completionUsageCompletionTokensDetailsJSON struct {
234+
ReasoningTokens apijson.Field
235+
raw string
236+
ExtraFields map[string]apijson.Field
237+
}
238+
239+
func (r *CompletionUsageCompletionTokensDetails) UnmarshalJSON(data []byte) (err error) {
240+
return apijson.UnmarshalRoot(data, r)
241+
}
242+
243+
func (r completionUsageCompletionTokensDetailsJSON) RawJSON() string {
244+
return r.raw
245+
}
246+
221247
type CompletionNewParams struct {
222248
// ID of the model to use. You can use the
223249
// [List models](https://platform.openai.com/docs/api-reference/models/list) API to

finetuningjob.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ type FineTuningJobNewParams struct {
524524
// job parameters should produce the same results, but may differ in rare cases. If
525525
// a seed is not specified, one will be generated for you.
526526
Seed param.Field[int64] `json:"seed"`
527-
// A string of up to 18 characters that will be added to your fine-tuned model
527+
// A string of up to 64 characters that will be added to your fine-tuned model
528528
// name.
529529
//
530530
// For example, a `suffix` of "custom-model-name" would produce a model name like

usage_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ func TestUsage(t *testing.T) {
2929
Role: openai.F(openai.ChatCompletionUserMessageParamRoleUser),
3030
Content: openai.F([]openai.ChatCompletionContentPartUnionParam{openai.ChatCompletionContentPartTextParam{Text: openai.F("text"), Type: openai.F(openai.ChatCompletionContentPartTextTypeText)}}),
3131
}}),
32-
Model: openai.F(openai.ChatModelGPT4o),
32+
Model: openai.F(openai.ChatModelO1Preview),
3333
})
3434
if err != nil {
3535
t.Error(err)

0 commit comments

Comments
 (0)