diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 73f712c242..68804e4da0 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.60.2"
+ ".": "1.61.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index d518bac586..e49b5c56e8 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 69
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 168d98e5cd..dcd1c06333 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,30 @@
# Changelog
+## 1.61.0 (2025-01-31)
+
+Full Changelog: [v1.60.2...v1.61.0](https://github.com/openai/openai-python/compare/v1.60.2...v1.61.0)
+
+### Features
+
+* **api:** add o3-mini ([#2067](https://github.com/openai/openai-python/issues/2067)) ([12b87a4](https://github.com/openai/openai-python/commit/12b87a4a1e6cb071a6b063d089585dec56a5d534))
+
+
+### Bug Fixes
+
+* **types:** correct metadata type + other fixes ([12b87a4](https://github.com/openai/openai-python/commit/12b87a4a1e6cb071a6b063d089585dec56a5d534))
+
+
+### Chores
+
+* **helpers:** section links ([ef8d3cc](https://github.com/openai/openai-python/commit/ef8d3cce40022d3482d341455be604e5f1afbd70))
+* **types:** fix Metadata types ([82d3156](https://github.com/openai/openai-python/commit/82d3156e74ed2f95edd10cd7ebea53d2b5562794))
+* update api.md ([#2063](https://github.com/openai/openai-python/issues/2063)) ([21964f0](https://github.com/openai/openai-python/commit/21964f00fb104011c4c357544114702052b74548))
+
+
+### Documentation
+
+* **readme:** current section links ([#2055](https://github.com/openai/openai-python/issues/2055)) ([ef8d3cc](https://github.com/openai/openai-python/commit/ef8d3cce40022d3482d341455be604e5f1afbd70))
+
## 1.60.2 (2025-01-27)
Full Changelog: [v1.60.1...v1.60.2](https://github.com/openai/openai-python/compare/v1.60.1...v1.60.2)
diff --git a/README.md b/README.md
index 5f7d477cc8..3c103f036c 100644
--- a/README.md
+++ b/README.md
@@ -304,7 +304,7 @@ However the real magic of the Realtime API is handling audio inputs / outputs, s
### Realtime error handling
-Whenever an error occurs, the Realtime API will send an [`error` event](https://platform.openai.com/docs/guides/realtime/realtime-api-beta#handling-errors) and the connection will stay open and remain usable. This means you need to handle it yourself, as *no errors are raised directly* by the SDK when an `error` event comes in.
+Whenever an error occurs, the Realtime API will send an [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling) and the connection will stay open and remain usable. This means you need to handle it yourself, as *no errors are raised directly* by the SDK when an `error` event comes in.
```py
client = AsyncOpenAI()
@@ -547,7 +547,7 @@ client.with_options(max_retries=5).chat.completions.create(
### Timeouts
By default requests time out after 10 minutes. You can configure this with a `timeout` option,
-which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
+which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object:
```python
from openai import OpenAI
diff --git a/api.md b/api.md
index 1edd3f6589..c1262fd2c5 100644
--- a/api.md
+++ b/api.md
@@ -5,6 +5,7 @@ from openai.types import (
ErrorObject,
FunctionDefinition,
FunctionParameters,
+ Metadata,
ResponseFormatJSONObject,
ResponseFormatJSONSchema,
ResponseFormatText,
@@ -99,7 +100,7 @@ Methods:
- client.files.list(\*\*params) -> SyncCursorPage[FileObject]
- client.files.delete(file_id) -> FileDeleted
- client.files.content(file_id) -> HttpxBinaryResponseContent
-- client.files.retrieve_content(file_id) -> str
+- client.files.retrieve_content(file_id) -> str
- client.files.wait_for_processing(\*args) -> FileObject
# Images
diff --git a/helpers.md b/helpers.md
index 3f3fafa45c..77823fa750 100644
--- a/helpers.md
+++ b/helpers.md
@@ -134,7 +134,7 @@ OpenAI supports streaming responses when interacting with the [Chat Completion](
The SDK provides a `.beta.chat.completions.stream()` method that wraps the `.chat.completions.create(stream=True)` stream providing a more granular event API & automatic accumulation of each delta.
-It also supports all aforementioned [parsing helpers](#parsing-helpers).
+It also supports all aforementioned [parsing helpers](#structured-outputs-parsing-helpers).
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
diff --git a/pyproject.toml b/pyproject.toml
index 9657bdc0ce..07913fcbd2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.60.2"
+version = "1.61.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/_version.py b/src/openai/_version.py
index c8f825db34..e9ab8be65e 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.60.2" # x-release-please-version
+__version__ = "1.61.0" # x-release-please-version
diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py
index 341446c43a..f338ad067d 100644
--- a/src/openai/resources/audio/transcriptions.py
+++ b/src/openai/resources/audio/transcriptions.py
@@ -138,8 +138,8 @@ def create(
Whisper V2 model) is currently available.
language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
- improve accuracy and latency.
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
@@ -302,8 +302,8 @@ async def create(
Whisper V2 model) is currently available.
language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
- improve accuracy and latency.
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py
index 4a887642e9..7e7ec19ec2 100644
--- a/src/openai/resources/batches.py
+++ b/src/openai/resources/batches.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Dict, Optional
+from typing import Optional
from typing_extensions import Literal
import httpx
@@ -19,10 +19,8 @@
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncCursorPage, AsyncCursorPage
from ..types.batch import Batch
-from .._base_client import (
- AsyncPaginator,
- make_request_options,
-)
+from .._base_client import AsyncPaginator, make_request_options
+from ..types.shared_params.metadata import Metadata
__all__ = ["Batches", "AsyncBatches"]
@@ -53,7 +51,7 @@ def create(
completion_window: Literal["24h"],
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -83,7 +81,12 @@ def create(
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
- metadata: Optional custom metadata for the batch.
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -258,7 +261,7 @@ async def create(
completion_window: Literal["24h"],
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -288,7 +291,12 @@ async def create(
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
- metadata: Optional custom metadata for the batch.
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py
index 2f2482b648..65b7c9cfc2 100644
--- a/src/openai/resources/beta/assistants.py
+++ b/src/openai/resources/beta/assistants.py
@@ -26,6 +26,7 @@
from ...types.chat_model import ChatModel
from ...types.beta.assistant import Assistant
from ...types.beta.assistant_deleted import AssistantDeleted
+from ...types.shared_params.metadata import Metadata
from ...types.beta.assistant_tool_param import AssistantToolParam
from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -58,7 +59,7 @@ def create(
model: Union[str, ChatModel],
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -88,9 +89,11 @@ def create(
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the assistant. The maximum length is 256 characters.
@@ -206,7 +209,7 @@ def update(
*,
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: str | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -232,9 +235,11 @@ def update(
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -444,7 +449,7 @@ async def create(
model: Union[str, ChatModel],
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -474,9 +479,11 @@ async def create(
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the assistant. The maximum length is 256 characters.
@@ -592,7 +599,7 @@ async def update(
*,
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: str | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -618,9 +625,11 @@ async def update(
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
diff --git a/src/openai/resources/beta/chat/completions.py b/src/openai/resources/beta/chat/completions.py
index 7771d2ff50..8a3a20d9e0 100644
--- a/src/openai/resources/beta/chat/completions.py
+++ b/src/openai/resources/beta/chat/completions.py
@@ -28,6 +28,7 @@
)
from ....types.chat_model import ChatModel
from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
+from ....types.shared_params import Metadata
from ....types.chat.chat_completion import ChatCompletion
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
@@ -76,7 +77,7 @@ def parse(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -221,7 +222,7 @@ def stream(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -268,8 +269,6 @@ def stream(
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
"""
- _validate_input_tools(tools)
-
extra_headers = {
"X-Stainless-Helper-Method": "beta.chat.completions.stream",
**(extra_headers or {}),
@@ -353,7 +352,7 @@ async def parse(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -498,7 +497,7 @@ def stream(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py
index b920c89207..4b337b7c19 100644
--- a/src/openai/resources/beta/realtime/sessions.py
+++ b/src/openai/resources/beta/realtime/sessions.py
@@ -89,8 +89,11 @@ def create(
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
@@ -232,8 +235,11 @@ async def create(
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py
index f780f6f558..e3374aba37 100644
--- a/src/openai/resources/beta/threads/messages.py
+++ b/src/openai/resources/beta/threads/messages.py
@@ -23,6 +23,7 @@
)
from ....types.beta.threads import message_list_params, message_create_params, message_update_params
from ....types.beta.threads.message import Message
+from ....types.shared_params.metadata import Metadata
from ....types.beta.threads.message_deleted import MessageDeleted
from ....types.beta.threads.message_content_part_param import MessageContentPartParam
@@ -56,7 +57,7 @@ def create(
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -81,9 +82,11 @@ def create(
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -155,7 +158,7 @@ def update(
message_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -168,9 +171,11 @@ def update(
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -330,7 +335,7 @@ async def create(
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -355,9 +360,11 @@ async def create(
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -429,7 +436,7 @@ async def update(
message_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -442,9 +449,11 @@ async def update(
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index f32a08f235..13301ad507 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -47,6 +47,7 @@
run_submit_tool_outputs_params,
)
from .....types.beta.threads.run import Run
+from .....types.shared_params.metadata import Metadata
from .....types.beta.assistant_tool_param import AssistantToolParam
from .....types.beta.assistant_stream_event import AssistantStreamEvent
from .....types.beta.threads.runs.run_step_include import RunStepInclude
@@ -92,7 +93,7 @@ def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -148,9 +149,11 @@ def create(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -233,7 +236,7 @@ def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -292,9 +295,11 @@ def create(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -373,7 +378,7 @@ def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -432,9 +437,11 @@ def create(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -512,7 +519,7 @@ def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -609,7 +616,7 @@ def update(
run_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -622,9 +629,11 @@ def update(
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -762,7 +771,7 @@ def create_and_poll(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -831,7 +840,7 @@ def create_and_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -862,7 +871,7 @@ def create_and_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -893,7 +902,7 @@ def create_and_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1010,7 +1019,7 @@ def stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1041,7 +1050,7 @@ def stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1072,7 +1081,7 @@ def stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1457,7 +1466,7 @@ async def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1513,9 +1522,11 @@ async def create(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1598,7 +1609,7 @@ async def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1657,9 +1668,11 @@ async def create(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1738,7 +1751,7 @@ async def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1797,9 +1810,11 @@ async def create(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1877,7 +1892,7 @@ async def create(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1974,7 +1989,7 @@ async def update(
run_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1987,9 +2002,11 @@ async def update(
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -2127,7 +2144,7 @@ async def create_and_poll(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -2196,7 +2213,7 @@ def create_and_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -2227,7 +2244,7 @@ def create_and_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -2258,7 +2275,7 @@ def create_and_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -2376,7 +2393,7 @@ def stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -2407,7 +2424,7 @@ def stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -2438,7 +2455,7 @@ def stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index 186b6f63e2..6ff8539501 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -53,6 +53,7 @@
from ....types.beta.thread import Thread
from ....types.beta.threads.run import Run
from ....types.beta.thread_deleted import ThreadDeleted
+from ....types.shared_params.metadata import Metadata
from ....types.beta.assistant_stream_event import AssistantStreamEvent
from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -92,7 +93,7 @@ def create(
self,
*,
messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -109,9 +110,11 @@ def create(
start the thread with.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -181,7 +184,7 @@ def update(
self,
thread_id: str,
*,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -195,9 +198,11 @@ def update(
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -272,7 +277,7 @@ def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -315,9 +320,11 @@ def create_and_run(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -357,7 +364,8 @@ def create_and_run(
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -403,7 +411,7 @@ def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -449,9 +457,11 @@ def create_and_run(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -487,7 +497,8 @@ def create_and_run(
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -533,7 +544,7 @@ def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -579,9 +590,11 @@ def create_and_run(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -617,7 +630,8 @@ def create_and_run(
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -662,7 +676,7 @@ def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -720,7 +734,7 @@ def create_and_run_poll(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -776,7 +790,7 @@ def create_and_run_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -805,7 +819,7 @@ def create_and_run_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -834,7 +848,7 @@ def create_and_run_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -926,7 +940,7 @@ async def create(
self,
*,
messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -943,9 +957,11 @@ async def create(
start the thread with.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -1015,7 +1031,7 @@ async def update(
self,
thread_id: str,
*,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1029,9 +1045,11 @@ async def update(
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -1106,7 +1124,7 @@ async def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1149,9 +1167,11 @@ async def create_and_run(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1191,7 +1211,8 @@ async def create_and_run(
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -1237,7 +1258,7 @@ async def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1283,9 +1304,11 @@ async def create_and_run(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1321,7 +1344,8 @@ async def create_and_run(
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -1367,7 +1391,7 @@ async def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1413,9 +1437,11 @@ async def create_and_run(
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1451,7 +1477,8 @@ async def create_and_run(
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -1496,7 +1523,7 @@ async def create_and_run(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1554,7 +1581,7 @@ async def create_and_run_poll(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1612,7 +1639,7 @@ def create_and_run_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1641,7 +1668,7 @@ def create_and_run_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1670,7 +1697,7 @@ def create_and_run_stream(
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py
index 6b44c602f1..1da52fb3c7 100644
--- a/src/openai/resources/beta/vector_stores/vector_stores.py
+++ b/src/openai/resources/beta/vector_stores/vector_stores.py
@@ -41,6 +41,7 @@
)
from ...._base_client import AsyncPaginator, make_request_options
from ....types.beta.vector_store import VectorStore
+from ....types.shared_params.metadata import Metadata
from ....types.beta.vector_store_deleted import VectorStoreDeleted
from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam
@@ -81,7 +82,7 @@ def create(
chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -104,9 +105,11 @@ def create(
files.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
@@ -176,7 +179,7 @@ def update(
vector_store_id: str,
*,
expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -192,9 +195,11 @@ def update(
expires_after: The expiration policy for a vector store.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
@@ -359,7 +364,7 @@ async def create(
chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -382,9 +387,11 @@ async def create(
files.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
@@ -454,7 +461,7 @@ async def update(
vector_store_id: str,
*,
expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -470,9 +477,11 @@ async def update(
expires_after: The expiration policy for a vector store.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py
index a9685c507a..34f6b50301 100644
--- a/src/openai/resources/chat/completions.py
+++ b/src/openai/resources/chat/completions.py
@@ -28,6 +28,7 @@
from ..._base_client import make_request_options
from ...types.chat_model import ChatModel
from ...types.chat.chat_completion import ChatCompletion
+from ...types.shared_params.metadata import Metadata
from ...types.chat.chat_completion_chunk import ChatCompletionChunk
from ...types.chat.chat_completion_modality import ChatCompletionModality
from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
@@ -75,7 +76,7 @@ def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -179,8 +180,12 @@ def create(
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -246,9 +251,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -324,7 +329,7 @@ def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -434,8 +439,12 @@ def create(
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -501,9 +510,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -572,7 +581,7 @@ def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -682,8 +691,12 @@ def create(
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -749,9 +762,9 @@ def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -819,7 +832,7 @@ def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -927,7 +940,7 @@ async def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -1031,8 +1044,12 @@ async def create(
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -1098,9 +1115,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -1176,7 +1193,7 @@ async def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -1286,8 +1303,12 @@ async def create(
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -1353,9 +1374,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -1424,7 +1445,7 @@ async def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -1534,8 +1555,12 @@ async def create(
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -1601,9 +1626,9 @@ async def create(
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -1671,7 +1696,7 @@ async def create(
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py
index 72950f2491..7abb22f239 100644
--- a/src/openai/types/__init__.py
+++ b/src/openai/types/__init__.py
@@ -6,6 +6,7 @@
from .image import Image as Image
from .model import Model as Model
from .shared import (
+ Metadata as Metadata,
ErrorObject as ErrorObject,
FunctionDefinition as FunctionDefinition,
FunctionParameters as FunctionParameters,
diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py
index 88805affbd..f1779c35e6 100644
--- a/src/openai/types/audio/transcription_create_params.py
+++ b/src/openai/types/audio/transcription_create_params.py
@@ -30,8 +30,8 @@ class TranscriptionCreateParams(TypedDict, total=False):
"""The language of the input audio.
Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
- improve accuracy and latency.
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
"""
prompt: str
diff --git a/src/openai/types/batch.py b/src/openai/types/batch.py
index ac3d7ea119..35de90ac85 100644
--- a/src/openai/types/batch.py
+++ b/src/openai/types/batch.py
@@ -1,11 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import builtins
from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
from .batch_error import BatchError
+from .shared.metadata import Metadata
from .batch_request_counts import BatchRequestCounts
__all__ = ["Batch", "Errors"]
@@ -70,12 +70,14 @@ class Batch(BaseModel):
in_progress_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the batch started processing."""
- metadata: Optional[builtins.object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
output_file_id: Optional[str] = None
diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py
index b30c4d4658..e5be1d2bac 100644
--- a/src/openai/types/batch_create_params.py
+++ b/src/openai/types/batch_create_params.py
@@ -2,9 +2,11 @@
from __future__ import annotations
-from typing import Dict, Optional
+from typing import Optional
from typing_extensions import Literal, Required, TypedDict
+from .shared_params.metadata import Metadata
+
__all__ = ["BatchCreateParams"]
@@ -35,5 +37,12 @@ class BatchCreateParams(TypedDict, total=False):
requests, and can be up to 200 MB in size.
"""
- metadata: Optional[Dict[str, str]]
- """Optional custom metadata for the batch."""
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py
index 3c8b8e403b..58421e0f66 100644
--- a/src/openai/types/beta/assistant.py
+++ b/src/openai/types/beta/assistant.py
@@ -5,6 +5,7 @@
from ..._models import BaseModel
from .assistant_tool import AssistantTool
+from ..shared.metadata import Metadata
from .assistant_response_format_option import AssistantResponseFormatOption
__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -51,12 +52,14 @@ class Assistant(BaseModel):
The maximum length is 256,000 characters.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: str
diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py
index 568b223ce7..e205856395 100644
--- a/src/openai/types/beta/assistant_create_params.py
+++ b/src/openai/types/beta/assistant_create_params.py
@@ -7,6 +7,7 @@
from ..chat_model import ChatModel
from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
from .file_chunking_strategy_param import FileChunkingStrategyParam
from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -39,12 +40,14 @@ class AssistantCreateParams(TypedDict, total=False):
The maximum length is 256,000 characters.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: Optional[str]
@@ -130,12 +133,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
store.
"""
- metadata: object
- """Set of 16 key-value pairs that can be attached to a vector store.
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
- This can be useful for storing additional information about the vector store in
- a structured format. Keys can be a maximum of 64 characters long and values can
- be a maximum of 512 characters long.
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py
index 9a66e41ab3..35065ef61b 100644
--- a/src/openai/types/beta/assistant_update_params.py
+++ b/src/openai/types/beta/assistant_update_params.py
@@ -6,6 +6,7 @@
from typing_extensions import TypedDict
from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -21,12 +22,14 @@ class AssistantUpdateParams(TypedDict, total=False):
The maximum length is 256,000 characters.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: str
diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py
index c4f72b9aff..f19d552a92 100644
--- a/src/openai/types/beta/realtime/conversation_item_create_event.py
+++ b/src/openai/types/beta/realtime/conversation_item_create_event.py
@@ -20,10 +20,10 @@ class ConversationItemCreateEvent(BaseModel):
"""Optional client-generated ID used to identify this event."""
previous_item_id: Optional[str] = None
- """
- The ID of the preceding item after which the new item will be inserted. If not
- set, the new item will be appended to the end of the conversation. If set to
- `root`, the new item will be added to the beginning of the conversation. If set
- to an existing ID, it allows an item to be inserted mid-conversation. If the ID
- cannot be found, an error will be returned and the item will not be added.
+ """The ID of the preceding item after which the new item will be inserted.
+
+ If not set, the new item will be appended to the end of the conversation. If set
+ to `root`, the new item will be added to the beginning of the conversation. If
+ set to an existing ID, it allows an item to be inserted mid-conversation. If the
+ ID cannot be found, an error will be returned and the item will not be added.
"""
diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py
index 6da5a63a9d..693d0fd54d 100644
--- a/src/openai/types/beta/realtime/conversation_item_create_event_param.py
+++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py
@@ -20,10 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False):
"""Optional client-generated ID used to identify this event."""
previous_item_id: str
- """
- The ID of the preceding item after which the new item will be inserted. If not
- set, the new item will be appended to the end of the conversation. If set to
- `root`, the new item will be added to the beginning of the conversation. If set
- to an existing ID, it allows an item to be inserted mid-conversation. If the ID
- cannot be found, an error will be returned and the item will not be added.
+ """The ID of the preceding item after which the new item will be inserted.
+
+ If not set, the new item will be appended to the end of the conversation. If set
+ to `root`, the new item will be added to the beginning of the conversation. If
+ set to an existing ID, it allows an item to be inserted mid-conversation. If the
+ ID cannot be found, an error will be returned and the item will not be added.
"""
diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py
index 3e1b1406c0..4c3c83d666 100644
--- a/src/openai/types/beta/realtime/realtime_response.py
+++ b/src/openai/types/beta/realtime/realtime_response.py
@@ -1,9 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import List, Union, Optional
from typing_extensions import Literal
from ...._models import BaseModel
+from ...shared.metadata import Metadata
from .conversation_item import ConversationItem
from .realtime_response_usage import RealtimeResponseUsage
from .realtime_response_status import RealtimeResponseStatus
@@ -15,8 +16,40 @@ class RealtimeResponse(BaseModel):
id: Optional[str] = None
"""The unique ID of the response."""
- metadata: Optional[object] = None
- """Developer-provided string key-value pairs associated with this response."""
+ conversation_id: Optional[str] = None
+ """
+ Which conversation the response is added to, determined by the `conversation`
+ field in the `response.create` event. If `auto`, the response will be added to
+ the default conversation and the value of `conversation_id` will be an id like
+ `conv_1234`. If `none`, the response will not be added to any conversation and
+ the value of `conversation_id` will be `null`. If responses are being triggered
+ by server VAD, the response will be added to the default conversation, thus the
+ `conversation_id` will be an id like `conv_1234`.
+ """
+
+ max_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls, that was used in this response.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model used to respond.
+
+ If there are multiple modalities, the model will pick one, for example if
+ `modalities` is `["text", "audio"]`, the model could be responding in either
+ text or audio.
+ """
object: Optional[Literal["realtime.response"]] = None
"""The object type, must be `realtime.response`."""
@@ -24,6 +57,9 @@ class RealtimeResponse(BaseModel):
output: Optional[List[ConversationItem]] = None
"""The list of output items generated by the response."""
+ output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None
"""
The final status of the response (`completed`, `cancelled`, `failed`, or
@@ -33,6 +69,9 @@ class RealtimeResponse(BaseModel):
status_details: Optional[RealtimeResponseStatus] = None
"""Additional details about the status."""
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
usage: Optional[RealtimeResponseUsage] = None
"""Usage statistics for the Response, this will correspond to billing.
@@ -40,3 +79,9 @@ class RealtimeResponse(BaseModel):
to the Conversation, thus output from previous turns (text and audio tokens)
will become the input for later turns.
"""
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """
+ The voice the model used to respond. Current voice options are `alloy`, `ash`,
+ `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py
index e4e5e7c68f..0801654bd8 100644
--- a/src/openai/types/beta/realtime/response_create_event.py
+++ b/src/openai/types/beta/realtime/response_create_event.py
@@ -4,6 +4,7 @@
from typing_extensions import Literal
from ...._models import BaseModel
+from ...shared.metadata import Metadata
from .conversation_item import ConversationItem
__all__ = ["ResponseCreateEvent", "Response", "ResponseTool"]
@@ -66,12 +67,14 @@ class Response(BaseModel):
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
modalities: Optional[List[Literal["text", "audio"]]] = None
diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py
index 7a4b5f086a..a87ef955e8 100644
--- a/src/openai/types/beta/realtime/response_create_event_param.py
+++ b/src/openai/types/beta/realtime/response_create_event_param.py
@@ -6,6 +6,7 @@
from typing_extensions import Literal, Required, TypedDict
from .conversation_item_param import ConversationItemParam
+from ...shared_params.metadata import Metadata
__all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"]
@@ -67,12 +68,14 @@ class Response(TypedDict, total=False):
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
modalities: List[Literal["text", "audio"]]
diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py
index 3708efeecd..1502d83d39 100644
--- a/src/openai/types/beta/realtime/session_create_params.py
+++ b/src/openai/types/beta/realtime/session_create_params.py
@@ -22,8 +22,11 @@ class SessionCreateParams(TypedDict, total=False):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
"""
instructions: str
@@ -101,12 +104,28 @@ class SessionCreateParams(TypedDict, total=False):
class InputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
model: str
"""
The model to use for transcription, `whisper-1` is the only currently supported
model.
"""
+ prompt: str
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
class Tool(TypedDict, total=False):
description: str
diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py
index 31f591b261..c26e62bef1 100644
--- a/src/openai/types/beta/realtime/session_create_response.py
+++ b/src/openai/types/beta/realtime/session_create_response.py
@@ -9,13 +9,13 @@
class ClientSecret(BaseModel):
- expires_at: Optional[int] = None
+ expires_at: int
"""Timestamp for when the token expires.
Currently, all tokens expire after one minute.
"""
- value: Optional[str] = None
+ value: str
"""
Ephemeral key usable in client environments to authenticate connections to the
Realtime API. Use this in client-side environments rather than a standard API
@@ -74,7 +74,7 @@ class TurnDetection(BaseModel):
class SessionCreateResponse(BaseModel):
- client_secret: Optional[ClientSecret] = None
+ client_secret: ClientSecret
"""Ephemeral key returned by the API."""
input_audio_format: Optional[str] = None
diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py
index 322e588a4e..62fb0a3998 100644
--- a/src/openai/types/beta/realtime/session_update_event.py
+++ b/src/openai/types/beta/realtime/session_update_event.py
@@ -9,12 +9,28 @@
class SessionInputAudioTranscription(BaseModel):
+ language: Optional[str] = None
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
model: Optional[str] = None
"""
The model to use for transcription, `whisper-1` is the only currently supported
model.
"""
+ prompt: Optional[str] = None
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
class SessionTool(BaseModel):
description: Optional[str] = None
@@ -78,8 +94,11 @@ class Session(BaseModel):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
"""
instructions: Optional[str] = None
diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py
index c01d9b6887..133cdd91a1 100644
--- a/src/openai/types/beta/realtime/session_update_event_param.py
+++ b/src/openai/types/beta/realtime/session_update_event_param.py
@@ -15,12 +15,28 @@
class SessionInputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
model: str
"""
The model to use for transcription, `whisper-1` is the only currently supported
model.
"""
+ prompt: str
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
class SessionTool(TypedDict, total=False):
description: str
@@ -84,8 +100,11 @@ class Session(TypedDict, total=False):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
"""
instructions: str
diff --git a/src/openai/types/beta/thread.py b/src/openai/types/beta/thread.py
index 37d50ccb93..789f66e48b 100644
--- a/src/openai/types/beta/thread.py
+++ b/src/openai/types/beta/thread.py
@@ -4,6 +4,7 @@
from typing_extensions import Literal
from ..._models import BaseModel
+from ..shared.metadata import Metadata
__all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -40,12 +41,14 @@ class Thread(BaseModel):
created_at: int
"""The Unix timestamp (in seconds) for when the thread was created."""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
object: Literal["thread"]
diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py
index 8310ba12f4..08f044c1be 100644
--- a/src/openai/types/beta/thread_create_and_run_params.py
+++ b/src/openai/types/beta/thread_create_and_run_params.py
@@ -8,6 +8,7 @@
from ..chat_model import ChatModel
from .function_tool_param import FunctionToolParam
from .file_search_tool_param import FileSearchToolParam
+from ..shared_params.metadata import Metadata
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .file_chunking_strategy_param import FileChunkingStrategyParam
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -67,12 +68,14 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
`incomplete_details` for more info.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: Union[str, ChatModel, None]
@@ -122,7 +125,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
"""
thread: Thread
- """If no thread is provided, an empty thread will be created."""
+ """Options to create a new thread.
+
+ If no thread is provided when running a request, an empty thread will be
+ created.
+ """
tool_choice: Optional[AssistantToolChoiceOptionParam]
"""
@@ -197,12 +204,14 @@ class ThreadMessage(TypedDict, total=False):
attachments: Optional[Iterable[ThreadMessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
@@ -230,12 +239,14 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
store.
"""
- metadata: object
- """Set of 16 key-value pairs that can be attached to a vector store.
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
- This can be useful for storing additional information about the vector store in
- a structured format. Keys can be a maximum of 64 characters long and values can
- be a maximum of 512 characters long.
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
@@ -270,12 +281,14 @@ class Thread(TypedDict, total=False):
start the thread with.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
tool_resources: Optional[ThreadToolResources]
diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py
index 3ac6c7d69b..127202753c 100644
--- a/src/openai/types/beta/thread_create_params.py
+++ b/src/openai/types/beta/thread_create_params.py
@@ -5,6 +5,7 @@
from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from ..shared_params.metadata import Metadata
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .file_chunking_strategy_param import FileChunkingStrategyParam
from .threads.message_content_part_param import MessageContentPartParam
@@ -29,12 +30,14 @@ class ThreadCreateParams(TypedDict, total=False):
start the thread with.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
tool_resources: Optional[ToolResources]
@@ -78,12 +81,14 @@ class Message(TypedDict, total=False):
attachments: Optional[Iterable[MessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
@@ -111,12 +116,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
store.
"""
- metadata: object
- """Set of 16 key-value pairs that can be attached to a vector store.
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
- This can be useful for storing additional information about the vector store in
- a structured format. Keys can be a maximum of 64 characters long and values can
- be a maximum of 512 characters long.
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
diff --git a/src/openai/types/beta/thread_update_params.py b/src/openai/types/beta/thread_update_params.py
index 78c5ec4f2e..b47ea8f3b0 100644
--- a/src/openai/types/beta/thread_update_params.py
+++ b/src/openai/types/beta/thread_update_params.py
@@ -5,16 +5,20 @@
from typing import List, Optional
from typing_extensions import TypedDict
+from ..shared_params.metadata import Metadata
+
__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
class ThreadUpdateParams(TypedDict, total=False):
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
tool_resources: Optional[ToolResources]
diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py
index 63c5c4800a..4a05a128eb 100644
--- a/src/openai/types/beta/threads/message.py
+++ b/src/openai/types/beta/threads/message.py
@@ -5,6 +5,7 @@
from ...._models import BaseModel
from .message_content import MessageContent
+from ...shared.metadata import Metadata
from ..code_interpreter_tool import CodeInterpreterTool
__all__ = [
@@ -66,12 +67,14 @@ class Message(BaseModel):
incomplete_details: Optional[IncompleteDetails] = None
"""On an incomplete message, details about why the message is incomplete."""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
object: Literal["thread.message"]
diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py
index 2c4edfdf71..b52386824a 100644
--- a/src/openai/types/beta/threads/message_create_params.py
+++ b/src/openai/types/beta/threads/message_create_params.py
@@ -5,6 +5,7 @@
from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from ...shared_params.metadata import Metadata
from .message_content_part_param import MessageContentPartParam
from ..code_interpreter_tool_param import CodeInterpreterToolParam
@@ -27,12 +28,14 @@ class MessageCreateParams(TypedDict, total=False):
attachments: Optional[Iterable[Attachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
diff --git a/src/openai/types/beta/threads/message_update_params.py b/src/openai/types/beta/threads/message_update_params.py
index e8f8cc910c..bb078281e6 100644
--- a/src/openai/types/beta/threads/message_update_params.py
+++ b/src/openai/types/beta/threads/message_update_params.py
@@ -5,16 +5,20 @@
from typing import Optional
from typing_extensions import Required, TypedDict
+from ...shared_params.metadata import Metadata
+
__all__ = ["MessageUpdateParams"]
class MessageUpdateParams(TypedDict, total=False):
thread_id: Required[str]
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py
index ad32135b7d..da9418d6f9 100644
--- a/src/openai/types/beta/threads/run.py
+++ b/src/openai/types/beta/threads/run.py
@@ -6,6 +6,7 @@
from ...._models import BaseModel
from .run_status import RunStatus
from ..assistant_tool import AssistantTool
+from ...shared.metadata import Metadata
from ..assistant_tool_choice_option import AssistantToolChoiceOption
from ..assistant_response_format_option import AssistantResponseFormatOption
from .required_action_function_tool_call import RequiredActionFunctionToolCall
@@ -133,12 +134,14 @@ class Run(BaseModel):
of the run.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: str
diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py
index 88dc39645e..091dd3da66 100644
--- a/src/openai/types/beta/threads/run_create_params.py
+++ b/src/openai/types/beta/threads/run_create_params.py
@@ -8,6 +8,7 @@
from ...chat_model import ChatModel
from ..assistant_tool_param import AssistantToolParam
from .runs.run_step_include import RunStepInclude
+from ...shared_params.metadata import Metadata
from .message_content_part_param import MessageContentPartParam
from ..code_interpreter_tool_param import CodeInterpreterToolParam
from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -80,12 +81,14 @@ class RunCreateParamsBase(TypedDict, total=False):
`incomplete_details` for more info.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: Union[str, ChatModel, None]
@@ -199,12 +202,14 @@ class AdditionalMessage(TypedDict, total=False):
attachments: Optional[Iterable[AdditionalMessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
diff --git a/src/openai/types/beta/threads/run_update_params.py b/src/openai/types/beta/threads/run_update_params.py
index cb4f053645..fbcbd3fb14 100644
--- a/src/openai/types/beta/threads/run_update_params.py
+++ b/src/openai/types/beta/threads/run_update_params.py
@@ -5,16 +5,20 @@
from typing import Optional
from typing_extensions import Required, TypedDict
+from ...shared_params.metadata import Metadata
+
__all__ = ["RunUpdateParams"]
class RunUpdateParams(TypedDict, total=False):
thread_id: Required[str]
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py
index 0445ae360d..b5f380c7b1 100644
--- a/src/openai/types/beta/threads/runs/run_step.py
+++ b/src/openai/types/beta/threads/runs/run_step.py
@@ -5,6 +5,7 @@
from ....._utils import PropertyInfo
from ....._models import BaseModel
+from ....shared.metadata import Metadata
from .tool_calls_step_details import ToolCallsStepDetails
from .message_creation_step_details import MessageCreationStepDetails
@@ -70,12 +71,14 @@ class RunStep(BaseModel):
Will be `null` if there are no errors.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
object: Literal["thread.run.step"]
diff --git a/src/openai/types/beta/vector_store.py b/src/openai/types/beta/vector_store.py
index 2d3ceea80c..b947dfb79d 100644
--- a/src/openai/types/beta/vector_store.py
+++ b/src/openai/types/beta/vector_store.py
@@ -4,6 +4,7 @@
from typing_extensions import Literal
from ..._models import BaseModel
+from ..shared.metadata import Metadata
__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
@@ -48,12 +49,14 @@ class VectorStore(BaseModel):
last_active_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the vector store was last active."""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: str
diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py
index 4fc7c38927..faca6d9000 100644
--- a/src/openai/types/beta/vector_store_create_params.py
+++ b/src/openai/types/beta/vector_store_create_params.py
@@ -5,6 +5,7 @@
from typing import List, Optional
from typing_extensions import Literal, Required, TypedDict
+from ..shared_params.metadata import Metadata
from .file_chunking_strategy_param import FileChunkingStrategyParam
__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
@@ -28,12 +29,14 @@ class VectorStoreCreateParams(TypedDict, total=False):
files.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: str
diff --git a/src/openai/types/beta/vector_store_update_params.py b/src/openai/types/beta/vector_store_update_params.py
index ff6c068efb..e91b3ba5ad 100644
--- a/src/openai/types/beta/vector_store_update_params.py
+++ b/src/openai/types/beta/vector_store_update_params.py
@@ -5,6 +5,8 @@
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
+from ..shared_params.metadata import Metadata
+
__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
@@ -12,12 +14,14 @@ class VectorStoreUpdateParams(TypedDict, total=False):
expires_after: Optional[ExpiresAfter]
"""The expiration policy for a vector store."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: Optional[str]
diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py
index 229fb822f4..35e3a3d784 100644
--- a/src/openai/types/chat/chat_completion_assistant_message_param.py
+++ b/src/openai/types/chat/chat_completion_assistant_message_param.py
@@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
"""The role of the messages author, in this case `assistant`."""
audio: Optional[Audio]
- """
- Data about a previous audio response from the model.
+ """Data about a previous audio response from the model.
+
[Learn more](https://platform.openai.com/docs/guides/audio).
"""
diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py
index 30d930b120..ec88ea1fb0 100644
--- a/src/openai/types/chat/completion_create_params.py
+++ b/src/openai/types/chat/completion_create_params.py
@@ -6,6 +6,7 @@
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..chat_model import ChatModel
+from ..shared_params.metadata import Metadata
from .chat_completion_modality import ChatCompletionModality
from .chat_completion_tool_param import ChatCompletionToolParam
from .chat_completion_audio_param import ChatCompletionAudioParam
@@ -122,10 +123,14 @@ class CompletionCreateParamsBase(TypedDict, total=False):
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
"""
- metadata: Optional[Dict[str, str]]
- """
- Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
modalities: Optional[List[ChatCompletionModality]]
@@ -216,9 +221,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
"""
diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py
index e1ac464320..c191cb9734 100644
--- a/src/openai/types/chat_model.py
+++ b/src/openai/types/chat_model.py
@@ -5,6 +5,8 @@
__all__ = ["ChatModel"]
ChatModel: TypeAlias = Literal[
+ "o3-mini",
+ "o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"o1-preview",
diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py
index c8776bca0e..74bf304904 100644
--- a/src/openai/types/shared/__init__.py
+++ b/src/openai/types/shared/__init__.py
@@ -1,5 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .metadata import Metadata as Metadata
from .error_object import ErrorObject as ErrorObject
from .function_definition import FunctionDefinition as FunctionDefinition
from .function_parameters import FunctionParameters as FunctionParameters
diff --git a/src/openai/types/shared/metadata.py b/src/openai/types/shared/metadata.py
new file mode 100644
index 0000000000..0da88c679c
--- /dev/null
+++ b/src/openai/types/shared/metadata.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["Metadata"]
+
+Metadata: TypeAlias = Dict[str, str]
diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py
index ab4057d59f..68a8db75fe 100644
--- a/src/openai/types/shared_params/__init__.py
+++ b/src/openai/types/shared_params/__init__.py
@@ -1,5 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .metadata import Metadata as Metadata
from .function_definition import FunctionDefinition as FunctionDefinition
from .function_parameters import FunctionParameters as FunctionParameters
from .response_format_text import ResponseFormatText as ResponseFormatText
diff --git a/src/openai/types/shared_params/metadata.py b/src/openai/types/shared_params/metadata.py
new file mode 100644
index 0000000000..821650b48b
--- /dev/null
+++ b/src/openai/types/shared_params/metadata.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import TypeAlias
+
+__all__ = ["Metadata"]
+
+Metadata: TypeAlias = Dict[str, str]
diff --git a/src/openai/types/upload.py b/src/openai/types/upload.py
index 1cf8ee97f8..d8108c62f9 100644
--- a/src/openai/types/upload.py
+++ b/src/openai/types/upload.py
@@ -39,4 +39,4 @@ class Upload(BaseModel):
"""The status of the Upload."""
file: Optional[FileObject] = None
- """The ready File object after the Upload is completed."""
+ """The `File` object represents a document that has been uploaded to OpenAI."""
diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py
index 908aa983be..5a17088ce6 100644
--- a/tests/api_resources/beta/realtime/test_sessions.py
+++ b/tests/api_resources/beta/realtime/test_sessions.py
@@ -26,7 +26,11 @@ def test_method_create(self, client: OpenAI) -> None:
def test_method_create_with_all_params(self, client: OpenAI) -> None:
session = client.beta.realtime.sessions.create(
input_audio_format="pcm16",
- input_audio_transcription={"model": "model"},
+ input_audio_transcription={
+ "language": "language",
+ "model": "model",
+ "prompt": "prompt",
+ },
instructions="instructions",
max_response_output_tokens=0,
modalities=["text"],
@@ -86,7 +90,11 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.realtime.sessions.create(
input_audio_format="pcm16",
- input_audio_transcription={"model": "model"},
+ input_audio_transcription={
+ "language": "language",
+ "model": "model",
+ "prompt": "prompt",
+ },
instructions="instructions",
max_response_output_tokens=0,
modalities=["text"],
diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py
index d9944448b7..458e3f5e90 100644
--- a/tests/api_resources/beta/test_assistants.py
+++ b/tests/api_resources/beta/test_assistants.py
@@ -34,7 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
model="gpt-4o",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
name="name",
response_format="auto",
temperature=1,
@@ -46,7 +46,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -131,7 +131,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None:
assistant_id="assistant_id",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
model="model",
name="name",
response_format="auto",
@@ -266,7 +266,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
model="gpt-4o",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
name="name",
response_format="auto",
temperature=1,
@@ -278,7 +278,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -363,7 +363,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) ->
assistant_id="assistant_id",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
model="model",
name="name",
response_format="auto",
diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py
index 789f870d6a..ecf5b11102 100644
--- a/tests/api_resources/beta/test_threads.py
+++ b/tests/api_resources/beta/test_threads.py
@@ -39,10 +39,10 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- metadata={},
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -51,7 +51,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -127,8 +127,8 @@ def test_method_update(self, client: OpenAI) -> None:
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
thread = client.beta.threads.update(
- "string",
- metadata={},
+ thread_id="thread_id",
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
@@ -219,7 +219,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -236,10 +236,10 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -248,7 +248,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -308,7 +308,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -324,10 +324,10 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -336,7 +336,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -403,10 +403,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- metadata={},
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -415,7 +415,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -491,8 +491,8 @@ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.threads.update(
- "string",
- metadata={},
+ thread_id="thread_id",
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
@@ -583,7 +583,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -600,10 +600,10 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -612,7 +612,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -672,7 +672,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -688,10 +688,10 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -700,7 +700,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py
index 99e1970c33..e13b8c7613 100644
--- a/tests/api_resources/beta/test_vector_stores.py
+++ b/tests/api_resources/beta/test_vector_stores.py
@@ -35,8 +35,8 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
"days": 1,
},
file_ids=["string"],
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@@ -113,8 +113,8 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None:
"anchor": "last_active_at",
"days": 1,
},
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@@ -240,8 +240,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
"days": 1,
},
file_ids=["string"],
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@@ -318,8 +318,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) ->
"anchor": "last_active_at",
"days": 1,
},
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py
index 06c37e608a..9189a2f29e 100644
--- a/tests/api_resources/beta/threads/test_messages.py
+++ b/tests/api_resources/beta/threads/test_messages.py
@@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
"tools": [{"type": "code_interpreter"}],
}
],
- metadata={},
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@@ -142,9 +142,9 @@ def test_method_update(self, client: OpenAI) -> None:
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
message = client.beta.threads.messages.update(
- "string",
- thread_id="string",
- metadata={},
+ message_id="message_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
"tools": [{"type": "code_interpreter"}],
}
],
- metadata={},
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@@ -411,9 +411,9 @@ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
message = await async_client.beta.threads.messages.update(
- "string",
- thread_id="string",
- metadata={},
+ message_id="message_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py
index c48cc6de43..48b39cfe5b 100644
--- a/tests/api_resources/beta/threads/test_runs.py
+++ b/tests/api_resources/beta/threads/test_runs.py
@@ -47,13 +47,13 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -130,13 +130,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -246,9 +246,9 @@ def test_method_update(self, client: OpenAI) -> None:
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
run = client.beta.threads.runs.update(
- "string",
- thread_id="string",
- metadata={},
+ run_id="run_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Run, run, path=["response"])
@@ -543,13 +543,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -626,13 +626,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -742,9 +742,9 @@ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
run = await async_client.beta.threads.runs.update(
- "string",
- thread_id="string",
- metadata={},
+ run_id="run_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Run, run, path=["response"])