Skip to content

Commit 96aa7d2

Browse files
authored
Merge branch 'main' into feature/interceptor
2 parents 3f69457 + 4dd5cf2 commit 96aa7d2

19 files changed

+316
-16
lines changed

Diff for: .release-please-manifest.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "1.59.6"
2+
".": "1.59.8"
33
}

Diff for: CHANGELOG.md

+32
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,37 @@
11
# Changelog
22

3+
## 1.59.8 (2025-01-17)
4+
5+
Full Changelog: [v1.59.7...v1.59.8](https://github.com/openai/openai-python/compare/v1.59.7...v1.59.8)
6+
7+
### Bug Fixes
8+
9+
* streaming ([c16f58e](https://github.com/openai/openai-python/commit/c16f58ead0bc85055b164182689ba74b7e939dfa))
10+
* **structured outputs:** avoid parsing empty empty content ([#2023](https://github.com/openai/openai-python/issues/2023)) ([6d3513c](https://github.com/openai/openai-python/commit/6d3513c86f6e5800f8f73a45e089b7a205327121))
11+
* **structured outputs:** correct schema coercion for inline ref expansion ([#2025](https://github.com/openai/openai-python/issues/2025)) ([2f4f0b3](https://github.com/openai/openai-python/commit/2f4f0b374207f162060c328b71ec995049dc42e8))
12+
* **types:** correct type for vector store chunking strategy ([#2017](https://github.com/openai/openai-python/issues/2017)) ([e389279](https://github.com/openai/openai-python/commit/e38927950a5cdad99065853fe7b72aad6bb322e9))
13+
14+
15+
### Chores
16+
17+
* **examples:** update realtime model ([f26746c](https://github.com/openai/openai-python/commit/f26746cbcd893d66cf8a3fd68a7c3779dc8c833c)), closes [#2020](https://github.com/openai/openai-python/issues/2020)
18+
* **internal:** bump pyright dependency ([#2021](https://github.com/openai/openai-python/issues/2021)) ([0a9a0f5](https://github.com/openai/openai-python/commit/0a9a0f5d8b9d5457643798287f893305006dd518))
19+
* **internal:** streaming refactors ([#2012](https://github.com/openai/openai-python/issues/2012)) ([d76a748](https://github.com/openai/openai-python/commit/d76a748f606743407f94dfc26758095560e2082a))
20+
* **internal:** update deps ([#2015](https://github.com/openai/openai-python/issues/2015)) ([514e0e4](https://github.com/openai/openai-python/commit/514e0e415f87ab4510262d29ed6125384e017b84))
21+
22+
23+
### Documentation
24+
25+
* **examples/azure:** example script with realtime API ([#1967](https://github.com/openai/openai-python/issues/1967)) ([84f2f9c](https://github.com/openai/openai-python/commit/84f2f9c0439229a7db7136fe78419292d34d1f81))
26+
27+
## 1.59.7 (2025-01-13)
28+
29+
Full Changelog: [v1.59.6...v1.59.7](https://github.com/openai/openai-python/compare/v1.59.6...v1.59.7)
30+
31+
### Chores
32+
33+
* export HttpxBinaryResponseContent class ([7191b71](https://github.com/openai/openai-python/commit/7191b71f3dcbbfcb2f2bec855c3bba93c956384e))
34+
335
## 1.59.6 (2025-01-09)
436

537
Full Changelog: [v1.59.5...v1.59.6](https://github.com/openai/openai-python/compare/v1.59.5...v1.59.6)

Diff for: README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ from openai import AsyncOpenAI
275275
async def main():
276276
client = AsyncOpenAI()
277277

278-
async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection:
278+
async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection:
279279
await connection.session.update(session={'modalities': ['text']})
280280

281281
await connection.conversation.item.create(
@@ -309,7 +309,7 @@ Whenever an error occurs, the Realtime API will send an [`error` event](https://
309309
```py
310310
client = AsyncOpenAI()
311311

312-
async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection:
312+
async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection:
313313
...
314314
async for event in connection:
315315
if event.type == 'error':

Diff for: api.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ from openai.types.beta import (
314314
OtherFileChunkingStrategyObject,
315315
StaticFileChunkingStrategy,
316316
StaticFileChunkingStrategyObject,
317-
StaticFileChunkingStrategyParam,
317+
StaticFileChunkingStrategyObjectParam,
318318
VectorStore,
319319
VectorStoreDeleted,
320320
)

Diff for: examples/realtime/azure_realtime.py

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import os
2+
import asyncio
3+
4+
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
5+
6+
from openai import AsyncAzureOpenAI
7+
8+
# Azure OpenAI Realtime Docs
9+
10+
# How-to: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio
11+
# Supported models and API versions: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio#supported-models
12+
# Entra ID auth: https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity
13+
14+
15+
async def main() -> None:
16+
"""The following example demonstrates how to configure Azure OpenAI to use the Realtime API.
17+
For an audio example, see push_to_talk_app.py and update the client and model parameter accordingly.
18+
19+
When prompted for user input, type a message and hit enter to send it to the model.
20+
Enter "q" to quit the conversation.
21+
"""
22+
23+
credential = DefaultAzureCredential()
24+
client = AsyncAzureOpenAI(
25+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
26+
azure_ad_token_provider=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default"),
27+
api_version="2024-10-01-preview",
28+
)
29+
async with client.beta.realtime.connect(
30+
model="gpt-4o-realtime-preview", # deployment name for your model
31+
) as connection:
32+
await connection.session.update(session={"modalities": ["text"]}) # type: ignore
33+
while True:
34+
user_input = input("Enter a message: ")
35+
if user_input == "q":
36+
break
37+
38+
await connection.conversation.item.create(
39+
item={
40+
"type": "message",
41+
"role": "user",
42+
"content": [{"type": "input_text", "text": user_input}],
43+
}
44+
)
45+
await connection.response.create()
46+
async for event in connection:
47+
if event.type == "response.text.delta":
48+
print(event.delta, flush=True, end="")
49+
elif event.type == "response.text.done":
50+
print()
51+
elif event.type == "response.done":
52+
break
53+
54+
await credential.close()
55+
56+
57+
asyncio.run(main())

Diff for: examples/realtime/push_to_talk_app.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ async def on_mount(self) -> None:
152152
self.run_worker(self.send_mic_audio())
153153

154154
async def handle_realtime_connection(self) -> None:
155-
async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as conn:
155+
async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview") as conn:
156156
self.connection = conn
157157
self.connected.set()
158158

Diff for: mypy.ini

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ cache_fine_grained = True
4444
# ```
4545
# Changing this codegen to make mypy happy would increase complexity
4646
# and would not be worth it.
47-
disable_error_code = func-returns-value
47+
disable_error_code = func-returns-value,overload-cannot-match
4848

4949
# https://github.com/python/mypy/issues/12162
5050
[mypy.overrides]

Diff for: pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "1.59.6"
3+
version = "1.59.8"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

Diff for: requirements-dev.lock

+2-2
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ msal==1.31.0
8383
# via msal-extensions
8484
msal-extensions==1.2.0
8585
# via azure-identity
86-
mypy==1.13.0
86+
mypy==1.14.1
8787
mypy-extensions==1.0.0
8888
# via black
8989
# via mypy
@@ -124,7 +124,7 @@ pygments==2.18.0
124124
# via rich
125125
pyjwt==2.8.0
126126
# via msal
127-
pyright==1.1.390
127+
pyright==1.1.392.post0
128128
pytest==8.3.3
129129
# via pytest-asyncio
130130
pytest-asyncio==0.24.0

Diff for: src/openai/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
)
3434
from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
3535
from ._utils._logs import setup_logging as _setup_logging
36+
from ._legacy_response import HttpxBinaryResponseContent as HttpxBinaryResponseContent
3637

3738
__all__ = [
3839
"types",

Diff for: src/openai/_legacy_response.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
269269
if origin == LegacyAPIResponse:
270270
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
271271

272-
if inspect.isclass(origin) and issubclass(origin, httpx.Response):
272+
if inspect.isclass(
273+
origin # pyright: ignore[reportUnknownArgumentType]
274+
) and issubclass(origin, httpx.Response):
273275
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
274276
# and pass that class to our request functions. We cannot change the variance to be either
275277
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
@@ -279,7 +281,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
279281
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
280282
return cast(R, response)
281283

282-
if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
284+
if (
285+
inspect.isclass(
286+
origin # pyright: ignore[reportUnknownArgumentType]
287+
)
288+
and not issubclass(origin, BaseModel)
289+
and issubclass(origin, pydantic.BaseModel)
290+
):
283291
raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
284292

285293
if (

Diff for: src/openai/_response.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
214214
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
215215
return cast(R, response)
216216

217-
if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
217+
if (
218+
inspect.isclass(
219+
origin # pyright: ignore[reportUnknownArgumentType]
220+
)
221+
and not issubclass(origin, BaseModel)
222+
and issubclass(origin, pydantic.BaseModel)
223+
):
218224
raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
219225

220226
if (

Diff for: src/openai/_version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "1.59.6" # x-release-please-version
4+
__version__ = "1.59.8" # x-release-please-version

Diff for: src/openai/lib/_parsing/_completions.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ def maybe_parse_content(
157157
response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
158158
message: ChatCompletionMessage | ParsedChatCompletionMessage[object],
159159
) -> ResponseFormatT | None:
160-
if has_rich_response_format(response_format) and message.content is not None and not message.refusal:
160+
if has_rich_response_format(response_format) and message.content and not message.refusal:
161161
return _parse_content(response_format, message.content)
162162

163163
return None

Diff for: src/openai/lib/_pydantic.py

+3
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,9 @@ def _ensure_strict_json_schema(
108108
# properties from the json schema take priority over the ones on the `$ref`
109109
json_schema.update({**resolved, **json_schema})
110110
json_schema.pop("$ref")
111+
# Since the schema expanded from `$ref` might not have `additionalProperties: false` applied,
112+
# we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid.
113+
return _ensure_strict_json_schema(json_schema, path=path, root=root)
111114

112115
return json_schema
113116

Diff for: src/openai/types/beta/__init__.py

+3
Original file line numberDiff line numberDiff line change
@@ -43,3 +43,6 @@
4343
from .assistant_response_format_option_param import (
4444
AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam,
4545
)
46+
from .static_file_chunking_strategy_object_param import (
47+
StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
48+
)

Diff for: src/openai/types/beta/file_chunking_strategy_param.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from typing_extensions import TypeAlias
77

88
from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam
9-
from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
9+
from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam
1010

1111
__all__ = ["FileChunkingStrategyParam"]
1212

13-
FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam]
13+
FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from __future__ import annotations
4+
5+
from typing_extensions import Literal, Required, TypedDict
6+
7+
from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
8+
9+
__all__ = ["StaticFileChunkingStrategyObjectParam"]
10+
11+
12+
class StaticFileChunkingStrategyObjectParam(TypedDict, total=False):
13+
static: Required[StaticFileChunkingStrategyParam]
14+
15+
type: Required[Literal["static"]]
16+
"""Always `static`."""

0 commit comments

Comments
 (0)