Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,18 @@ jobs:
run: rye build

- name: Get GitHub OIDC Token
if: github.repository == 'stainless-sdks/openai-python'
if: |-
github.repository == 'stainless-sdks/openai-python' &&
!startsWith(github.ref, 'refs/heads/stl/')
id: github-oidc
uses: actions/github-script@v8
with:
script: core.setOutput('github_token', await core.getIDToken());

- name: Upload tarball
if: github.repository == 'stainless-sdks/openai-python'
if: |-
github.repository == 'stainless-sdks/openai-python' &&
!startsWith(github.ref, 'refs/heads/stl/')
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
Expand Down
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "2.26.0"
".": "2.27.0"
}
6 changes: 3 additions & 3 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 148
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9c802d45a9bf2a896b5fd22ac22bba185e8a145bd40ed242df9bb87a05e954eb.yml
openapi_spec_hash: 97984ed69285e660b7d5c810c69ed449
config_hash: 8240b8a7a7fc145a45b93bda435612d6
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d941452f026bbad07fe99ddd39154686a1f5689270f6be2ab40661c1d8982d15.yml
openapi_spec_hash: b96a607abae511c4cea24a6f00c5a6f9
config_hash: 4fde9d6d4eb1de30bf2b6784f3da8bd8
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
# Changelog

## 2.27.0 (2026-03-11)

Full Changelog: [v2.26.0...v2.27.0](https://github.com/openai/openai-python/compare/v2.26.0...v2.27.0)

### Features

* **api:** api update ([60ab24a](https://github.com/openai/openai-python/commit/60ab24ae722a7fa280eb4b2273da4ded1f930231))


### Chores

* **internal:** codegen related update ([93af129](https://github.com/openai/openai-python/commit/93af129e8919de6d3aee19329c8bdef0532bd20a))

## 2.26.0 (2026-03-05)

Full Changelog: [v2.25.0...v2.26.0](https://github.com/openai/openai-python/compare/v2.25.0...v2.26.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "2.26.0"
version = "2.27.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "2.26.0" # x-release-please-version
__version__ = "2.27.0" # x-release-please-version
6 changes: 0 additions & 6 deletions src/openai/types/responses/response_input_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@ class ResponseInputFile(BaseModel):
type: Literal["input_file"]
"""The type of the input item. Always `input_file`."""

detail: Optional[Literal["low", "high"]] = None
"""The detail level of the file to be sent to the model.

One of `high` or `low`. Defaults to `high`.
"""

file_data: Optional[str] = None
"""The content of the file to be sent to the model."""

Expand Down
6 changes: 0 additions & 6 deletions src/openai/types/responses/response_input_file_content.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@ class ResponseInputFileContent(BaseModel):
type: Literal["input_file"]
"""The type of the input item. Always `input_file`."""

detail: Optional[Literal["high", "low"]] = None
"""The detail level of the file to be sent to the model.

One of `high` or `low`. Defaults to `high`.
"""

file_data: Optional[str] = None
"""The base64-encoded data of the file to be sent to the model."""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@ class ResponseInputFileContentParam(TypedDict, total=False):
type: Required[Literal["input_file"]]
"""The type of the input item. Always `input_file`."""

detail: Literal["high", "low"]
"""The detail level of the file to be sent to the model.

One of `high` or `low`. Defaults to `high`.
"""

file_data: Optional[str]
"""The base64-encoded data of the file to be sent to the model."""

Expand Down
6 changes: 0 additions & 6 deletions src/openai/types/responses/response_input_file_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,6 @@ class ResponseInputFileParam(TypedDict, total=False):
type: Required[Literal["input_file"]]
"""The type of the input item. Always `input_file`."""

detail: Literal["low", "high"]
"""The detail level of the file to be sent to the model.

One of `high` or `low`. Defaults to `high`.
"""

file_data: str
"""The content of the file to be sent to the model."""

Expand Down
32 changes: 16 additions & 16 deletions tests/api_resources/audio/test_transcriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ class TestTranscriptions:
@parametrize
def test_method_create_overload_1(self, client: OpenAI) -> None:
transcription = client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
)
assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"])

@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
transcription = client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
chunking_strategy="auto",
include=["logprobs"],
Expand All @@ -46,7 +46,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
response = client.audio.transcriptions.with_raw_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
)

Expand All @@ -58,7 +58,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
with client.audio.transcriptions.with_streaming_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
) as response:
assert not response.is_closed
Expand All @@ -72,7 +72,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_method_create_overload_2(self, client: OpenAI) -> None:
transcription_stream = client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
)
Expand All @@ -81,7 +81,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
transcription_stream = client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
chunking_strategy="auto",
Expand All @@ -99,7 +99,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
response = client.audio.transcriptions.with_raw_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
)
Expand All @@ -111,7 +111,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
with client.audio.transcriptions.with_streaming_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
) as response:
Expand All @@ -132,15 +132,15 @@ class TestAsyncTranscriptions:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
transcription = await async_client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
)
assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"])

@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
transcription = await async_client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
chunking_strategy="auto",
include=["logprobs"],
Expand All @@ -158,7 +158,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.audio.transcriptions.with_raw_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
)

Expand All @@ -170,7 +170,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.audio.transcriptions.with_streaming_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
) as response:
assert not response.is_closed
Expand All @@ -184,7 +184,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
transcription_stream = await async_client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
)
Expand All @@ -193,7 +193,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
transcription_stream = await async_client.audio.transcriptions.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
chunking_strategy="auto",
Expand All @@ -211,7 +211,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.audio.transcriptions.with_raw_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
)
Expand All @@ -223,7 +223,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.audio.transcriptions.with_streaming_response.create(
file=b"raw file contents",
file=b"Example data",
model="gpt-4o-transcribe",
stream=True,
) as response:
Expand Down
16 changes: 8 additions & 8 deletions tests/api_resources/audio/test_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ class TestTranslations:
@parametrize
def test_method_create(self, client: OpenAI) -> None:
translation = client.audio.translations.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
)
assert_matches_type(TranslationCreateResponse, translation, path=["response"])

@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
translation = client.audio.translations.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
prompt="prompt",
response_format="json",
Expand All @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.audio.translations.with_raw_response.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
)

Expand All @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.audio.translations.with_streaming_response.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
) as response:
assert not response.is_closed
Expand All @@ -71,15 +71,15 @@ class TestAsyncTranslations:
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
translation = await async_client.audio.translations.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
)
assert_matches_type(TranslationCreateResponse, translation, path=["response"])

@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
translation = await async_client.audio.translations.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
prompt="prompt",
response_format="json",
Expand All @@ -90,7 +90,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.audio.translations.with_raw_response.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
)

Expand All @@ -102,7 +102,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.audio.translations.with_streaming_response.create(
file=b"raw file contents",
file=b"Example data",
model="whisper-1",
) as response:
assert not response.is_closed
Expand Down
4 changes: 2 additions & 2 deletions tests/api_resources/containers/test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_method_create(self, client: OpenAI) -> None:
def test_method_create_with_all_params(self, client: OpenAI) -> None:
file = client.containers.files.create(
container_id="container_id",
file=b"raw file contents",
file=b"Example data",
file_id="file_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
Expand Down Expand Up @@ -230,7 +230,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.create(
container_id="container_id",
file=b"raw file contents",
file=b"Example data",
file_id="file_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
Expand Down
4 changes: 2 additions & 2 deletions tests/api_resources/skills/test_versions.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
version = client.skills.versions.create(
skill_id="skill_123",
default=True,
files=[b"raw file contents"],
files=[b"Example data"],
)
assert_matches_type(SkillVersion, version, path=["response"])

Expand Down Expand Up @@ -227,7 +227,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
version = await async_client.skills.versions.create(
skill_id="skill_123",
default=True,
files=[b"raw file contents"],
files=[b"Example data"],
)
assert_matches_type(SkillVersion, version, path=["response"])

Expand Down
Loading
Loading