Skip to content

Commit 1ecf8f6

Browse files
feat(api): add timestamp_granularities, add gpt-3.5-turbo-0125 model (#1125)
1 parent d231d1f commit 1ecf8f6

File tree

5 files changed

+44
-10
lines changed

5 files changed

+44
-10
lines changed

src/openai/resources/audio/transcriptions.py

+13-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from __future__ import annotations
44

5-
from typing import Union, Mapping, cast
5+
from typing import List, Union, Mapping, cast
66
from typing_extensions import Literal
77

88
import httpx
@@ -39,6 +39,7 @@ def create(
3939
prompt: str | NotGiven = NOT_GIVEN,
4040
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
4141
temperature: float | NotGiven = NOT_GIVEN,
42+
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
4243
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
4344
# The extra values given here take precedence over values defined on the client or passed to this method.
4445
extra_headers: Headers | None = None,
@@ -74,6 +75,10 @@ def create(
7475
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
7576
automatically increase the temperature until certain thresholds are hit.
7677
78+
timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
79+
options: `word`, or `segment`. Note: There is no additional latency for segment
80+
timestamps, but generating word timestamps incurs additional latency.
81+
7782
extra_headers: Send extra headers
7883
7984
extra_query: Add additional query parameters to the request
@@ -90,6 +95,7 @@ def create(
9095
"prompt": prompt,
9196
"response_format": response_format,
9297
"temperature": temperature,
98+
"timestamp_granularities": timestamp_granularities,
9399
}
94100
)
95101
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
@@ -127,6 +133,7 @@ async def create(
127133
prompt: str | NotGiven = NOT_GIVEN,
128134
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
129135
temperature: float | NotGiven = NOT_GIVEN,
136+
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
130137
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
131138
# The extra values given here take precedence over values defined on the client or passed to this method.
132139
extra_headers: Headers | None = None,
@@ -162,6 +169,10 @@ async def create(
162169
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
163170
automatically increase the temperature until certain thresholds are hit.
164171
172+
timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
173+
options: `word`, or `segment`. Note: There is no additional latency for segment
174+
timestamps, but generating word timestamps incurs additional latency.
175+
165176
extra_headers: Send extra headers
166177
167178
extra_query: Add additional query parameters to the request
@@ -178,6 +189,7 @@ async def create(
178189
"prompt": prompt,
179190
"response_format": response_format,
180191
"temperature": temperature,
192+
"timestamp_granularities": timestamp_granularities,
181193
}
182194
)
183195
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])

src/openai/resources/chat/completions.py

+14-6
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ def create(
6161
"gpt-3.5-turbo-0301",
6262
"gpt-3.5-turbo-0613",
6363
"gpt-3.5-turbo-1106",
64+
"gpt-3.5-turbo-0125",
6465
"gpt-3.5-turbo-16k-0613",
6566
],
6667
],
@@ -155,7 +156,7 @@ def create(
155156
156157
response_format: An object specifying the format that the model must output. Compatible with
157158
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
158-
`gpt-3.5-turbo-1106`.
159+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
159160
160161
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
161162
message the model generates is valid JSON.
@@ -250,6 +251,7 @@ def create(
250251
"gpt-3.5-turbo-0301",
251252
"gpt-3.5-turbo-0613",
252253
"gpt-3.5-turbo-1106",
254+
"gpt-3.5-turbo-0125",
253255
"gpt-3.5-turbo-16k-0613",
254256
],
255257
],
@@ -351,7 +353,7 @@ def create(
351353
352354
response_format: An object specifying the format that the model must output. Compatible with
353355
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
354-
`gpt-3.5-turbo-1106`.
356+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
355357
356358
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
357359
message the model generates is valid JSON.
@@ -439,6 +441,7 @@ def create(
439441
"gpt-3.5-turbo-0301",
440442
"gpt-3.5-turbo-0613",
441443
"gpt-3.5-turbo-1106",
444+
"gpt-3.5-turbo-0125",
442445
"gpt-3.5-turbo-16k-0613",
443446
],
444447
],
@@ -540,7 +543,7 @@ def create(
540543
541544
response_format: An object specifying the format that the model must output. Compatible with
542545
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
543-
`gpt-3.5-turbo-1106`.
546+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
544547
545548
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
546549
message the model generates is valid JSON.
@@ -628,6 +631,7 @@ def create(
628631
"gpt-3.5-turbo-0301",
629632
"gpt-3.5-turbo-0613",
630633
"gpt-3.5-turbo-1106",
634+
"gpt-3.5-turbo-0125",
631635
"gpt-3.5-turbo-16k-0613",
632636
],
633637
],
@@ -724,6 +728,7 @@ async def create(
724728
"gpt-3.5-turbo-0301",
725729
"gpt-3.5-turbo-0613",
726730
"gpt-3.5-turbo-1106",
731+
"gpt-3.5-turbo-0125",
727732
"gpt-3.5-turbo-16k-0613",
728733
],
729734
],
@@ -818,7 +823,7 @@ async def create(
818823
819824
response_format: An object specifying the format that the model must output. Compatible with
820825
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
821-
`gpt-3.5-turbo-1106`.
826+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
822827
823828
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
824829
message the model generates is valid JSON.
@@ -913,6 +918,7 @@ async def create(
913918
"gpt-3.5-turbo-0301",
914919
"gpt-3.5-turbo-0613",
915920
"gpt-3.5-turbo-1106",
921+
"gpt-3.5-turbo-0125",
916922
"gpt-3.5-turbo-16k-0613",
917923
],
918924
],
@@ -1014,7 +1020,7 @@ async def create(
10141020
10151021
response_format: An object specifying the format that the model must output. Compatible with
10161022
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1017-
`gpt-3.5-turbo-1106`.
1023+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
10181024
10191025
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
10201026
message the model generates is valid JSON.
@@ -1102,6 +1108,7 @@ async def create(
11021108
"gpt-3.5-turbo-0301",
11031109
"gpt-3.5-turbo-0613",
11041110
"gpt-3.5-turbo-1106",
1111+
"gpt-3.5-turbo-0125",
11051112
"gpt-3.5-turbo-16k-0613",
11061113
],
11071114
],
@@ -1203,7 +1210,7 @@ async def create(
12031210
12041211
response_format: An object specifying the format that the model must output. Compatible with
12051212
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1206-
`gpt-3.5-turbo-1106`.
1213+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
12071214
12081215
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
12091216
message the model generates is valid JSON.
@@ -1291,6 +1298,7 @@ async def create(
12911298
"gpt-3.5-turbo-0301",
12921299
"gpt-3.5-turbo-0613",
12931300
"gpt-3.5-turbo-1106",
1301+
"gpt-3.5-turbo-0125",
12941302
"gpt-3.5-turbo-16k-0613",
12951303
],
12961304
],

src/openai/types/audio/transcription_create_params.py

+13-2
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@
22

33
from __future__ import annotations
44

5-
from typing import Union
6-
from typing_extensions import Literal, Required, TypedDict
5+
from typing import List, Union
6+
from typing_extensions import Literal, Required, Annotated, TypedDict
77

88
from ..._types import FileTypes
9+
from ..._utils import PropertyInfo
910

1011
__all__ = ["TranscriptionCreateParams"]
1112

@@ -50,3 +51,13 @@ class TranscriptionCreateParams(TypedDict, total=False):
5051
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
5152
automatically increase the temperature until certain thresholds are hit.
5253
"""
54+
55+
timestamp_granularities: Annotated[
56+
List[Literal["word", "segment"]], PropertyInfo(alias="timestamp_granularities[]")
57+
]
58+
"""The timestamp granularities to populate for this transcription.
59+
60+
Any of these options: `word`, or `segment`. Note: There is no additional latency
61+
for segment timestamps, but generating word timestamps incurs additional
62+
latency.
63+
"""

src/openai/types/chat/completion_create_params.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
4747
"gpt-3.5-turbo-0301",
4848
"gpt-3.5-turbo-0613",
4949
"gpt-3.5-turbo-1106",
50+
"gpt-3.5-turbo-0125",
5051
"gpt-3.5-turbo-16k-0613",
5152
],
5253
]
@@ -137,7 +138,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
137138
138139
Compatible with
139140
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
140-
`gpt-3.5-turbo-1106`.
141+
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
141142
142143
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
143144
message the model generates is valid JSON.

tests/api_resources/audio/test_transcriptions.py

+2
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
3434
prompt="string",
3535
response_format="json",
3636
temperature=0,
37+
timestamp_granularities=["word", "segment"],
3738
)
3839
assert_matches_type(Transcription, transcription, path=["response"])
3940

@@ -84,6 +85,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
8485
prompt="string",
8586
response_format="json",
8687
temperature=0,
88+
timestamp_granularities=["word", "segment"],
8789
)
8890
assert_matches_type(Transcription, transcription, path=["response"])
8991

0 commit comments

Comments
 (0)