Skip to content

Commit b0eae50

Browse files
feat(api): support storing chat completions, enabling evals and model distillation in the dashboard (#72)
Learn more at http://openai.com/devday2024
1 parent 29dfb56 commit b0eae50

5 files changed

+83
-36
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 68
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ad878332083dd506a478a293db78dc9e7b1b2124f2682e1d991225bc5bbcc3b.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-52b934aee6468039ec7f4ce046a282b5fbce114afc708e70f17121df654f71da.yml

chat.go

+30-29
Original file line numberDiff line numberDiff line change
@@ -30,33 +30,34 @@ func NewChatService(opts ...option.RequestOption) (r *ChatService) {
3030
type ChatModel = string
3131

3232
const (
33-
ChatModelO1Preview ChatModel = "o1-preview"
34-
ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12"
35-
ChatModelO1Mini ChatModel = "o1-mini"
36-
ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12"
37-
ChatModelGPT4o ChatModel = "gpt-4o"
38-
ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06"
39-
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
40-
ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest"
41-
ChatModelGPT4oMini ChatModel = "gpt-4o-mini"
42-
ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"
43-
ChatModelGPT4Turbo ChatModel = "gpt-4-turbo"
44-
ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09"
45-
ChatModelGPT4_0125Preview ChatModel = "gpt-4-0125-preview"
46-
ChatModelGPT4TurboPreview ChatModel = "gpt-4-turbo-preview"
47-
ChatModelGPT4_1106Preview ChatModel = "gpt-4-1106-preview"
48-
ChatModelGPT4VisionPreview ChatModel = "gpt-4-vision-preview"
49-
ChatModelGPT4 ChatModel = "gpt-4"
50-
ChatModelGPT4_0314 ChatModel = "gpt-4-0314"
51-
ChatModelGPT4_0613 ChatModel = "gpt-4-0613"
52-
ChatModelGPT4_32k ChatModel = "gpt-4-32k"
53-
ChatModelGPT4_32k0314 ChatModel = "gpt-4-32k-0314"
54-
ChatModelGPT4_32k0613 ChatModel = "gpt-4-32k-0613"
55-
ChatModelGPT3_5Turbo ChatModel = "gpt-3.5-turbo"
56-
ChatModelGPT3_5Turbo16k ChatModel = "gpt-3.5-turbo-16k"
57-
ChatModelGPT3_5Turbo0301 ChatModel = "gpt-3.5-turbo-0301"
58-
ChatModelGPT3_5Turbo0613 ChatModel = "gpt-3.5-turbo-0613"
59-
ChatModelGPT3_5Turbo1106 ChatModel = "gpt-3.5-turbo-1106"
60-
ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125"
61-
ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613"
33+
ChatModelO1Preview ChatModel = "o1-preview"
34+
ChatModelO1Preview2024_09_12 ChatModel = "o1-preview-2024-09-12"
35+
ChatModelO1Mini ChatModel = "o1-mini"
36+
ChatModelO1Mini2024_09_12 ChatModel = "o1-mini-2024-09-12"
37+
ChatModelGPT4o ChatModel = "gpt-4o"
38+
ChatModelGPT4o2024_08_06 ChatModel = "gpt-4o-2024-08-06"
39+
ChatModelGPT4o2024_05_13 ChatModel = "gpt-4o-2024-05-13"
40+
ChatModelGPT4oRealtimePreview2024_10_01 ChatModel = "gpt-4o-realtime-preview-2024-10-01"
41+
ChatModelChatgpt4oLatest ChatModel = "chatgpt-4o-latest"
42+
ChatModelGPT4oMini ChatModel = "gpt-4o-mini"
43+
ChatModelGPT4oMini2024_07_18 ChatModel = "gpt-4o-mini-2024-07-18"
44+
ChatModelGPT4Turbo ChatModel = "gpt-4-turbo"
45+
ChatModelGPT4Turbo2024_04_09 ChatModel = "gpt-4-turbo-2024-04-09"
46+
ChatModelGPT4_0125Preview ChatModel = "gpt-4-0125-preview"
47+
ChatModelGPT4TurboPreview ChatModel = "gpt-4-turbo-preview"
48+
ChatModelGPT4_1106Preview ChatModel = "gpt-4-1106-preview"
49+
ChatModelGPT4VisionPreview ChatModel = "gpt-4-vision-preview"
50+
ChatModelGPT4 ChatModel = "gpt-4"
51+
ChatModelGPT4_0314 ChatModel = "gpt-4-0314"
52+
ChatModelGPT4_0613 ChatModel = "gpt-4-0613"
53+
ChatModelGPT4_32k ChatModel = "gpt-4-32k"
54+
ChatModelGPT4_32k0314 ChatModel = "gpt-4-32k-0314"
55+
ChatModelGPT4_32k0613 ChatModel = "gpt-4-32k-0613"
56+
ChatModelGPT3_5Turbo ChatModel = "gpt-3.5-turbo"
57+
ChatModelGPT3_5Turbo16k ChatModel = "gpt-3.5-turbo-16k"
58+
ChatModelGPT3_5Turbo0301 ChatModel = "gpt-3.5-turbo-0301"
59+
ChatModelGPT3_5Turbo0613 ChatModel = "gpt-3.5-turbo-0613"
60+
ChatModelGPT3_5Turbo1106 ChatModel = "gpt-3.5-turbo-1106"
61+
ChatModelGPT3_5Turbo0125 ChatModel = "gpt-3.5-turbo-0125"
62+
ChatModelGPT3_5Turbo16k0613 ChatModel = "gpt-3.5-turbo-16k-0613"
6263
)

chatcompletion.go

+12-2
Original file line numberDiff line numberDiff line change
@@ -1442,8 +1442,12 @@ func (r ChatCompletionUserMessageParamRole) IsKnown() bool {
14421442
}
14431443

14441444
type ChatCompletionNewParams struct {
1445-
// A list of messages comprising the conversation so far.
1446-
// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
1445+
// A list of messages comprising the conversation so far. Depending on the
1446+
// [model](https://platform.openai.com/docs/models) you use, different message
1447+
// types (modalities) are supported, like
1448+
// [text](https://platform.openai.com/docs/guides/text-generation),
1449+
// [images](https://platform.openai.com/docs/guides/vision), and
1450+
// [audio](https://platform.openai.com/docs/guides/audio).
14471451
Messages param.Field[[]ChatCompletionMessageParamUnion] `json:"messages,required"`
14481452
// ID of the model to use. See the
14491453
// [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
@@ -1495,6 +1499,9 @@ type ChatCompletionNewParams struct {
14951499
// compatible with
14961500
// [o1 series models](https://platform.openai.com/docs/guides/reasoning).
14971501
MaxTokens param.Field[int64] `json:"max_tokens"`
1502+
// Developer-defined tags and values used for filtering completions in the
1503+
// [dashboard](https://platform.openai.com/completions).
1504+
Metadata param.Field[map[string]string] `json:"metadata"`
14981505
// How many chat completion choices to generate for each input message. Note that
14991506
// you will be charged based on the number of generated tokens across all of the
15001507
// choices. Keep `n` as `1` to minimize costs.
@@ -1554,6 +1561,9 @@ type ChatCompletionNewParams struct {
15541561
ServiceTier param.Field[ChatCompletionNewParamsServiceTier] `json:"service_tier"`
15551562
// Up to 4 sequences where the API will stop generating further tokens.
15561563
Stop param.Field[ChatCompletionNewParamsStopUnion] `json:"stop"`
1564+
// Whether or not to store the output of this completion request for traffic
1565+
// logging in the [dashboard](https://platform.openai.com/completions).
1566+
Store param.Field[bool] `json:"store"`
15571567
// Options for streaming response. Only set this when you set `stream: true`.
15581568
StreamOptions param.Field[ChatCompletionStreamOptionsParam] `json:"stream_options"`
15591569
// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will

chatcompletion_test.go

+7-3
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,19 @@ func TestChatCompletionNewWithOptionalParams(t *testing.T) {
4848
Logprobs: openai.F(true),
4949
MaxCompletionTokens: openai.F(int64(0)),
5050
MaxTokens: openai.F(int64(0)),
51-
N: openai.F(int64(1)),
52-
ParallelToolCalls: openai.F(true),
53-
PresencePenalty: openai.F(-2.000000),
51+
Metadata: openai.F(map[string]string{
52+
"foo": "string",
53+
}),
54+
N: openai.F(int64(1)),
55+
ParallelToolCalls: openai.F(true),
56+
PresencePenalty: openai.F(-2.000000),
5457
ResponseFormat: openai.F[openai.ChatCompletionNewParamsResponseFormatUnion](shared.ResponseFormatTextParam{
5558
Type: openai.F(shared.ResponseFormatTextTypeText),
5659
}),
5760
Seed: openai.F(int64(-9007199254740991)),
5861
ServiceTier: openai.F(openai.ChatCompletionNewParamsServiceTierAuto),
5962
Stop: openai.F[openai.ChatCompletionNewParamsStopUnion](shared.UnionString("string")),
63+
Store: openai.F(true),
6064
StreamOptions: openai.F(openai.ChatCompletionStreamOptionsParam{
6165
IncludeUsage: openai.F(true),
6266
}),

completion.go

+33-1
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,9 @@ type CompletionUsage struct {
200200
TotalTokens int64 `json:"total_tokens,required"`
201201
// Breakdown of tokens used in a completion.
202202
CompletionTokensDetails CompletionUsageCompletionTokensDetails `json:"completion_tokens_details"`
203-
JSON completionUsageJSON `json:"-"`
203+
// Breakdown of tokens used in the prompt.
204+
PromptTokensDetails CompletionUsagePromptTokensDetails `json:"prompt_tokens_details"`
205+
JSON completionUsageJSON `json:"-"`
204206
}
205207

206208
// completionUsageJSON contains the JSON metadata for the struct [CompletionUsage]
@@ -209,6 +211,7 @@ type completionUsageJSON struct {
209211
PromptTokens apijson.Field
210212
TotalTokens apijson.Field
211213
CompletionTokensDetails apijson.Field
214+
PromptTokensDetails apijson.Field
212215
raw string
213216
ExtraFields map[string]apijson.Field
214217
}
@@ -223,6 +226,8 @@ func (r completionUsageJSON) RawJSON() string {
223226

224227
// Breakdown of tokens used in a completion.
225228
type CompletionUsageCompletionTokensDetails struct {
229+
// Audio input tokens generated by the model.
230+
AudioTokens int64 `json:"audio_tokens"`
226231
// Tokens generated by the model for reasoning.
227232
ReasoningTokens int64 `json:"reasoning_tokens"`
228233
JSON completionUsageCompletionTokensDetailsJSON `json:"-"`
@@ -231,6 +236,7 @@ type CompletionUsageCompletionTokensDetails struct {
231236
// completionUsageCompletionTokensDetailsJSON contains the JSON metadata for the
232237
// struct [CompletionUsageCompletionTokensDetails]
233238
type completionUsageCompletionTokensDetailsJSON struct {
239+
AudioTokens apijson.Field
234240
ReasoningTokens apijson.Field
235241
raw string
236242
ExtraFields map[string]apijson.Field
@@ -244,6 +250,32 @@ func (r completionUsageCompletionTokensDetailsJSON) RawJSON() string {
244250
return r.raw
245251
}
246252

253+
// Breakdown of tokens used in the prompt.
254+
type CompletionUsagePromptTokensDetails struct {
255+
// Audio input tokens present in the prompt.
256+
AudioTokens int64 `json:"audio_tokens"`
257+
// Cached tokens present in the prompt.
258+
CachedTokens int64 `json:"cached_tokens"`
259+
JSON completionUsagePromptTokensDetailsJSON `json:"-"`
260+
}
261+
262+
// completionUsagePromptTokensDetailsJSON contains the JSON metadata for the struct
263+
// [CompletionUsagePromptTokensDetails]
264+
type completionUsagePromptTokensDetailsJSON struct {
265+
AudioTokens apijson.Field
266+
CachedTokens apijson.Field
267+
raw string
268+
ExtraFields map[string]apijson.Field
269+
}
270+
271+
func (r *CompletionUsagePromptTokensDetails) UnmarshalJSON(data []byte) (err error) {
272+
return apijson.UnmarshalRoot(data, r)
273+
}
274+
275+
func (r completionUsagePromptTokensDetailsJSON) RawJSON() string {
276+
return r.raw
277+
}
278+
247279
type CompletionNewParams struct {
248280
// ID of the model to use. You can use the
249281
// [List models](https://platform.openai.com/docs/api-reference/models/list) API to

0 commit comments

Comments
 (0)