From c37965e6c0258ea4290d9e7b6b32346792801cb6 Mon Sep 17 00:00:00 2001 From: Nisarg Patel <63173283+Nisarg38@users.noreply.github.com> Date: Sat, 18 Jan 2025 02:43:40 -0500 Subject: [PATCH 1/4] Intial Commit --- examples/interceptor.py | 75 +++++++++++++++++++ src/openai/_base_client.py | 58 +++++++++++++- src/openai/_client.py | 15 +++- src/openai/_interceptor.py | 104 +++++++++++++++++++++++++ tests/test_interceptors.py | 150 +++++++++++++++++++++++++++++++++++++ 5 files changed, 397 insertions(+), 5 deletions(-) create mode 100644 examples/interceptor.py create mode 100644 src/openai/_interceptor.py create mode 100644 tests/test_interceptors.py diff --git a/examples/interceptor.py b/examples/interceptor.py new file mode 100644 index 0000000000..c646465354 --- /dev/null +++ b/examples/interceptor.py @@ -0,0 +1,75 @@ +from typing import TypeVar, Any +import time +from typing_extensions import override +from openai import OpenAI +from openai._interceptor import Interceptor, InterceptorRequest, InterceptorResponse +from dotenv import load_dotenv + +load_dotenv() + +T = TypeVar("T") + +# Define a custom logging interceptor +class LoggingInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + print(f"Request: {request.method} {request.url}") + print(f"Headers: {request.headers}") + if request.body: + print(f"Body: {request.body}") + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + print(f"Response Status: {response.status_code}") + print(f"Response Headers: {response.headers}") + print(f"Response Body: {response.body}") + return response + +# Define an interceptor that implements retry logic with exponential backoff +class RetryInterceptor(Interceptor): + def __init__(self, max_retries: int = 3, initial_delay: float = 1.0): + self.max_retries = max_retries + self.initial_delay = initial_delay + self.current_retry = 0 + + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + # If response is successful or we've exhausted retries, return as is + if response.status_code < 500 or self.current_retry >= self.max_retries: + self.current_retry = 0 # Reset for next request + return response + + # Calculate delay with exponential backoff + delay = self.initial_delay * (2 ** self.current_retry) + print(f"Request failed with status {response.status_code}. Retrying in {delay} seconds...") + time.sleep(delay) + + self.current_retry += 1 + # Trigger a retry by raising an exception + raise Exception(f"Retrying request (attempt {self.current_retry}/{self.max_retries})") + +# Initialize the OpenAI client and add interceptors +if __name__ == "__main__": + # Create the interceptor chain + logging_interceptor = LoggingInterceptor() + retry_interceptor = RetryInterceptor(max_retries=3, initial_delay=1.0) + + # Create client with interceptors + client = OpenAI( + interceptors=[logging_interceptor, retry_interceptor] + ) + + # Make a request using the client + response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Tell me about error handling and retries in software systems."}], + max_tokens=100, + ) + + # Output the final response + print("Final Response:", response) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 1fa039c0b1..4537803a77 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -89,6 +89,8 @@ ) from ._legacy_response import LegacyAPIResponse +from ._interceptor import InterceptorChain, InterceptorRequest, InterceptorResponse, Interceptor + log: logging.Logger = logging.getLogger(__name__) log.addFilter(SensitiveHeadersFilter()) @@ -339,6 +341,8 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]): _strict_response_validation: bool _idempotency_header: str | None _default_stream_cls: type[_DefaultStreamT] | None = None + _interceptor_chain: InterceptorChain + def __init__( self, @@ -353,6 +357,8 @@ def __init__( proxies: ProxiesTypes | None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, + interceptors: list[Interceptor] | None = None, + ) -> None: self._version = version self._base_url = self._enforce_trailing_slash(URL(base_url)) @@ -372,7 +378,9 @@ def __init__( "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `openai.DEFAULT_MAX_RETRIES`" ) - def _enforce_trailing_slash(self, url: URL) -> URL: + + self._interceptor_chain = InterceptorChain(interceptors) + def _enforce_trailing_slash(self, url: URL) -> URL: if url.raw_path.endswith(b"/"): return url return url.copy_with(raw_path=url.raw_path + b"/") @@ -463,8 +471,27 @@ def _build_request( else: raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") + # Build base headers and params headers = self._build_headers(options, retries_taken=retries_taken) params = _merge_mappings(self.default_query, options.params) + prepared_url = self._prepare_url(options.url) + + # Execute request interceptors + interceptor_request = InterceptorRequest( + method=options.method, + url=str(prepared_url), + headers=dict(headers), + params=dict(params), + body=json_data, + ) + interceptor_request = self._interceptor_chain.execute_before_request(interceptor_request) + + # Apply interceptor modifications + headers = httpx.Headers(interceptor_request.headers) + params = interceptor_request.params or {} + json_data = interceptor_request.body + prepared_url = URL(interceptor_request.url) + content_type = headers.get("Content-Type") files = options.files @@ -506,7 +533,7 @@ def _build_request( return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, - method=options.method, + method=interceptor_request.method, url=prepared_url, # the `Query` type that we use is incompatible with qs' # `Params` type as it needs to be typed as `Mapping[str, object]` @@ -582,6 +609,22 @@ def _process_response_data( return cast(ResponseT, data) try: + # Create InterceptorResponse and execute interceptors + interceptor_response = InterceptorResponse( + status_code=response.status_code, + headers=dict(response.headers), + body=data, + request=InterceptorRequest( + method=response.request.method, + url=str(response.request.url), + headers=dict(response.request.headers), + body=response.request._content if response.request._content else None, + ), + raw_response=response, + ) + interceptor_response = self._interceptor_chain.execute_after_response(interceptor_response) + data = interceptor_response.body + if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol): return cast(ResponseT, cast_to.build(response=response, data=data)) @@ -796,6 +839,9 @@ def __init__( custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, _strict_response_validation: bool, + + interceptors: list[Interceptor] | None = None, + ) -> None: kwargs: dict[str, Any] = {} if limits is not None: @@ -859,6 +905,9 @@ def __init__( custom_query=custom_query, custom_headers=custom_headers, _strict_response_validation=_strict_response_validation, + + interceptors=interceptors, + ) self._client = http_client or SyncHttpxClientWrapper( base_url=base_url, @@ -1382,6 +1431,8 @@ def __init__( http_client: httpx.AsyncClient | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, + + interceptors: list[Interceptor] | None = None, ) -> None: kwargs: dict[str, Any] = {} if limits is not None: @@ -1445,6 +1496,9 @@ def __init__( custom_query=custom_query, custom_headers=custom_headers, _strict_response_validation=_strict_response_validation, + + interceptors=interceptors, + ) self._client = http_client or AsyncHttpxClientWrapper( base_url=base_url, diff --git a/src/openai/_client.py b/src/openai/_client.py index c784694f20..40abe08107 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -7,6 +7,7 @@ from typing_extensions import Self, override import httpx +from openai._interceptor import Interceptor from . import _exceptions from ._qs import Querystring @@ -96,7 +97,8 @@ def __init__( # outlining your use-case to help us decide if it should be # part of our public interface in the future. _strict_response_validation: bool = False, - ) -> None: + interceptors: list[Interceptor] | None = None, + ) -> None: """Construct a new synchronous openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: @@ -136,7 +138,8 @@ def __init__( custom_headers=default_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, - ) + interceptors=interceptors, +) self._default_stream_cls = Stream @@ -192,6 +195,7 @@ def copy( set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, + interceptors: list[Interceptor] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ @@ -227,6 +231,7 @@ def copy( max_retries=max_retries if is_given(max_retries) else self.max_retries, default_headers=headers, default_query=params, + interceptors=interceptors, **_extra_kwargs, ) @@ -323,7 +328,8 @@ def __init__( # outlining your use-case to help us decide if it should be # part of our public interface in the future. _strict_response_validation: bool = False, - ) -> None: + interceptors: list[Interceptor] | None = None, + ) -> None: """Construct a new async openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: @@ -363,6 +369,7 @@ def __init__( custom_headers=default_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, + interceptors=interceptors, ) self._default_stream_cls = AsyncStream @@ -419,6 +426,7 @@ def copy( set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, + interceptors: list[Interceptor] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ @@ -454,6 +462,7 @@ def copy( max_retries=max_retries if is_given(max_retries) else self.max_retries, default_headers=headers, default_query=params, + interceptors=interceptors, **_extra_kwargs, ) diff --git a/src/openai/_interceptor.py b/src/openai/_interceptor.py new file mode 100644 index 0000000000..135f6faba2 --- /dev/null +++ b/src/openai/_interceptor.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Dict, Optional, TypeVar, Generic, Any, Union + +import httpx + +from ._types import Body + +T = TypeVar("T") + +@dataclass +class InterceptorRequest: + """Container for request data that can be modified by interceptors""" + method: str + url: str + headers: Dict[str, str] + params: Optional[Dict[str, Any]] = None + body: Optional[Union[Body, bytes]] = None + +@dataclass +class InterceptorResponse(Generic[T]): + """Container for response data that can be processed by interceptors""" + status_code: int + headers: Dict[str, str] + body: T + request: InterceptorRequest + raw_response: httpx.Response + +class Interceptor(ABC): + """Base class for implementing request/response interceptors""" + + @abstractmethod + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + """Process and optionally modify the request before it is sent. + + Args: + request: The request to process + + Returns: + The processed request + """ + pass + + @abstractmethod + def after_response(self, response: InterceptorResponse[T]) -> InterceptorResponse[T]: + """Process and optionally modify the response after it is received. + + Args: + response: The response to process + + Returns: + The processed response + """ + pass + +class InterceptorChain: + """Manages a chain of interceptors that process requests/responses in sequence""" + + def __init__(self, interceptors: Optional[list[Interceptor]] = None): + self._interceptors = interceptors or [] + + def add_interceptor(self, interceptor: Interceptor) -> None: + """Add an interceptor to the chain""" + self._interceptors.append(interceptor) + + def execute_before_request(self, request: InterceptorRequest) -> InterceptorRequest: + """Execute all interceptors' before_request methods in sequence""" + print("\n=== Intercepted Request ===") + print(f"Method: {request.method}") + print(f"URL: {request.url}") + print(f"Headers: {request.headers}") + if request.params: + print(f"Query Params: {request.params}") + if request.body: + print(f"Request Body: {request.body}") + print("========================\n") + + current_request = request + for interceptor in self._interceptors: + try: + current_request = interceptor.before_request(current_request) + except Exception as e: + # Log error but continue processing + print(f"Error in interceptor {interceptor.__class__.__name__}: {e}") + return current_request + + def execute_after_response(self, response: InterceptorResponse[T]) -> InterceptorResponse[T]: + """Execute all interceptors' after_response methods in sequence""" + print("\n=== Intercepted Response ===") + print(f"Status Code: {response.status_code}") + print(f"Headers: {response.headers}") + print(f"Response Body: {response.body}") + print("=========================\n") + + current_response = response + for interceptor in self._interceptors: + try: + current_response = interceptor.after_response(current_response) + except Exception as e: + # Log error but continue processing + print(f"Error in interceptor {interceptor.__class__.__name__}: {e}") + return current_response \ No newline at end of file diff --git a/tests/test_interceptors.py b/tests/test_interceptors.py new file mode 100644 index 0000000000..3e9c883bf7 --- /dev/null +++ b/tests/test_interceptors.py @@ -0,0 +1,150 @@ +import pytest +import httpx +from typing import Dict, Any, cast, TypeVar +from typing_extensions import override +from openai._interceptor import InterceptorRequest, InterceptorResponse, Interceptor + +T = TypeVar("T") + +class TestMessageModifierInterceptor: + def test_before_request_chat_completions(self, caplog: pytest.LogCaptureFixture) -> None: + class MessageModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "messages" in body: + print("\n=== Message Modification Process ===") + for message in body["messages"]: + if message["role"] == "user": + print(f"Original message: {message['content']}") + message["content"] += " [Disclaimer: This is a modified message]" + print(f"Modified message: {message['content']}") + print("=================================\n") + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + print("\n=== Response Received ===") + print(f"Status code: {response.status_code}") + print(f"Response body: {response.body}") + print("======================\n") + return response + + interceptor = MessageModifierInterceptor() + request = InterceptorRequest( + method="post", + url="https://api.openai.com/v1/chat/completions", + headers={"Authorization": "Bearer test_key"}, + body={ + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Hello"}] + } + ) + + processed_request = interceptor.before_request(request) + + # Verify the message was modified + assert isinstance(processed_request.body, dict) + body = cast(Dict[str, Any], processed_request.body) # type: ignore + assert body["messages"][0]["content"] == "Hello [Disclaimer: This is a modified message]" + assert body["model"] == "gpt-3.5-turbo" # Other fields unchanged + assert processed_request.method == "post" # Request properties unchanged + assert processed_request.url == "https://api.openai.com/v1/chat/completions" + + def test_before_request_non_chat_completions(self, caplog: pytest.LogCaptureFixture) -> None: + """Test that the interceptor doesn't modify non-chat-completions requests""" + class MessageModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "messages" in body: + print("\n=== Message Modification Process ===") + for message in body["messages"]: + if message["role"] == "user": + print(f"Original message: {message['content']}") + message["content"] += " [Disclaimer: This is a modified message]" + print(f"Modified message: {message['content']}") + print("=================================\n") + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + print("\n=== Response Received ===") + print(f"Status code: {response.status_code}") + print(f"Response body: {response.body}") + print("======================\n") + return response + + interceptor = MessageModifierInterceptor() + request = InterceptorRequest( + method="post", + url="https://api.openai.com/v1/embeddings", + headers={"Authorization": "Bearer test_key"}, + body={ + "model": "text-embedding-ada-002", + "input": "Hello" + } + ) + + processed_request = interceptor.before_request(request) + + # Verify the request was not modified + assert isinstance(processed_request.body, dict) + body = cast(Dict[str, Any], processed_request.body) # type: ignore + assert body["input"] == "Hello" # Content unchanged + assert body["model"] == "text-embedding-ada-002" # Model unchanged + + def test_after_response(self, caplog: pytest.LogCaptureFixture) -> None: + """Test that after_response doesn't modify the response""" + class MessageModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "messages" in body: + print("\n=== Message Modification Process ===") + for message in body["messages"]: + if message["role"] == "user": + print(f"Original message: {message['content']}") + message["content"] += " [Disclaimer: This is a modified message]" + print(f"Modified message: {message['content']}") + print("=================================\n") + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + print("\n=== Response Received ===") + print(f"Status code: {response.status_code}") + print(f"Response body: {response.body}") + print("======================\n") + return response + + interceptor = MessageModifierInterceptor() + request = InterceptorRequest( + method="post", + url="https://api.openai.com/v1/chat/completions", + headers={"Authorization": "Bearer test_key"} + ) + + mock_raw_response = httpx.Response( + status_code=200, + headers={"Content-Type": "application/json"}, + json={"choices": [{"message": {"content": "Hello!"}}]} + ) + + response = InterceptorResponse[Dict[str, Any]]( + status_code=200, + headers={"Content-Type": "application/json"}, + body={"choices": [{"message": {"content": "Hello!"}}]}, + request=request, + raw_response=mock_raw_response + ) + + processed_response = interceptor.after_response(response) + + # Verify response is unchanged + assert processed_response == response + assert processed_response.status_code == 200 + assert processed_response.body == {"choices": [{"message": {"content": "Hello!"}}]} From 3f694572be58682cfd2429c854dc3de0acd9f991 Mon Sep 17 00:00:00 2001 From: Nisarg Patel <63173283+Nisarg38@users.noreply.github.com> Date: Sat, 18 Jan 2025 02:51:06 -0500 Subject: [PATCH 2/4] Interceptor-Clean up --- src/openai/_interceptor.py | 51 ++++++-------------------------------- 1 file changed, 8 insertions(+), 43 deletions(-) diff --git a/src/openai/_interceptor.py b/src/openai/_interceptor.py index 135f6faba2..2aeb6e5650 100644 --- a/src/openai/_interceptor.py +++ b/src/openai/_interceptor.py @@ -12,7 +12,7 @@ @dataclass class InterceptorRequest: - """Container for request data that can be modified by interceptors""" + """Request data container for interceptor processing""" method: str url: str headers: Dict[str, str] @@ -21,7 +21,7 @@ class InterceptorRequest: @dataclass class InterceptorResponse(Generic[T]): - """Container for response data that can be processed by interceptors""" + """Response data container for interceptor processing""" status_code: int headers: Dict[str, str] body: T @@ -29,76 +29,41 @@ class InterceptorResponse(Generic[T]): raw_response: httpx.Response class Interceptor(ABC): - """Base class for implementing request/response interceptors""" + """Base class for request/response interceptors""" @abstractmethod def before_request(self, request: InterceptorRequest) -> InterceptorRequest: - """Process and optionally modify the request before it is sent. - - Args: - request: The request to process - - Returns: - The processed request - """ + """Process request before sending""" pass @abstractmethod def after_response(self, response: InterceptorResponse[T]) -> InterceptorResponse[T]: - """Process and optionally modify the response after it is received. - - Args: - response: The response to process - - Returns: - The processed response - """ + """Process response after receiving""" pass class InterceptorChain: - """Manages a chain of interceptors that process requests/responses in sequence""" + """Chain of interceptors for sequential request/response processing""" def __init__(self, interceptors: Optional[list[Interceptor]] = None): self._interceptors = interceptors or [] def add_interceptor(self, interceptor: Interceptor) -> None: - """Add an interceptor to the chain""" self._interceptors.append(interceptor) def execute_before_request(self, request: InterceptorRequest) -> InterceptorRequest: - """Execute all interceptors' before_request methods in sequence""" - print("\n=== Intercepted Request ===") - print(f"Method: {request.method}") - print(f"URL: {request.url}") - print(f"Headers: {request.headers}") - if request.params: - print(f"Query Params: {request.params}") - if request.body: - print(f"Request Body: {request.body}") - print("========================\n") - current_request = request for interceptor in self._interceptors: try: current_request = interceptor.before_request(current_request) except Exception as e: - # Log error but continue processing - print(f"Error in interceptor {interceptor.__class__.__name__}: {e}") + continue return current_request def execute_after_response(self, response: InterceptorResponse[T]) -> InterceptorResponse[T]: - """Execute all interceptors' after_response methods in sequence""" - print("\n=== Intercepted Response ===") - print(f"Status Code: {response.status_code}") - print(f"Headers: {response.headers}") - print(f"Response Body: {response.body}") - print("=========================\n") - current_response = response for interceptor in self._interceptors: try: current_response = interceptor.after_response(current_response) except Exception as e: - # Log error but continue processing - print(f"Error in interceptor {interceptor.__class__.__name__}: {e}") + continue return current_response \ No newline at end of file From 3decdccc7bdd04ff02d60d8d6fae838bbdbd51d6 Mon Sep 17 00:00:00 2001 From: Nisarg Patel <63173283+Nisarg38@users.noreply.github.com> Date: Sun, 2 Feb 2025 18:52:38 -0500 Subject: [PATCH 3/4] feat(interceptors): improve code style and remove unnecessary imports - Added Support for AzureOpenAI --- examples/interceptor.py | 20 +++++++-------- src/openai/_base_client.py | 16 +++--------- src/openai/_client.py | 9 ++++--- src/openai/_interceptor.py | 14 ++++++++--- src/openai/lib/azure.py | 15 ++++++++++++ tests/test_interceptors.py | 50 ++++++++++++++++++-------------------- 6 files changed, 67 insertions(+), 57 deletions(-) diff --git a/examples/interceptor.py b/examples/interceptor.py index c646465354..5db8b750be 100644 --- a/examples/interceptor.py +++ b/examples/interceptor.py @@ -1,14 +1,13 @@ -from typing import TypeVar, Any import time +from typing import Any, TypeVar from typing_extensions import override + from openai import OpenAI from openai._interceptor import Interceptor, InterceptorRequest, InterceptorResponse -from dotenv import load_dotenv - -load_dotenv() T = TypeVar("T") + # Define a custom logging interceptor class LoggingInterceptor(Interceptor): @override @@ -26,6 +25,7 @@ def after_response(self, response: InterceptorResponse[Any]) -> InterceptorRespo print(f"Response Body: {response.body}") return response + # Define an interceptor that implements retry logic with exponential backoff class RetryInterceptor(Interceptor): def __init__(self, max_retries: int = 3, initial_delay: float = 1.0): @@ -45,14 +45,15 @@ def after_response(self, response: InterceptorResponse[Any]) -> InterceptorRespo return response # Calculate delay with exponential backoff - delay = self.initial_delay * (2 ** self.current_retry) + delay = self.initial_delay * (2**self.current_retry) print(f"Request failed with status {response.status_code}. Retrying in {delay} seconds...") time.sleep(delay) - + self.current_retry += 1 # Trigger a retry by raising an exception raise Exception(f"Retrying request (attempt {self.current_retry}/{self.max_retries})") + # Initialize the OpenAI client and add interceptors if __name__ == "__main__": # Create the interceptor chain @@ -60,15 +61,14 @@ def after_response(self, response: InterceptorResponse[Any]) -> InterceptorRespo retry_interceptor = RetryInterceptor(max_retries=3, initial_delay=1.0) # Create client with interceptors - client = OpenAI( - interceptors=[logging_interceptor, retry_interceptor] - ) + client = OpenAI(interceptors=[logging_interceptor, retry_interceptor]) # Make a request using the client - response = client.chat.completions.create( + response = client.chat.completions.create( # type: ignore model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Tell me about error handling and retries in software systems."}], max_tokens=100, + stream=False, ) # Output the final response diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 4537803a77..2a15193725 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -87,10 +87,9 @@ APIConnectionError, APIResponseValidationError, ) +from ._interceptor import Interceptor, InterceptorChain, InterceptorRequest, InterceptorResponse from ._legacy_response import LegacyAPIResponse -from ._interceptor import InterceptorChain, InterceptorRequest, InterceptorResponse, Interceptor - log: logging.Logger = logging.getLogger(__name__) log.addFilter(SensitiveHeadersFilter()) @@ -343,7 +342,6 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]): _default_stream_cls: type[_DefaultStreamT] | None = None _interceptor_chain: InterceptorChain - def __init__( self, *, @@ -358,7 +356,6 @@ def __init__( custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, interceptors: list[Interceptor] | None = None, - ) -> None: self._version = version self._base_url = self._enforce_trailing_slash(URL(base_url)) @@ -378,9 +375,9 @@ def __init__( "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `openai.DEFAULT_MAX_RETRIES`" ) - self._interceptor_chain = InterceptorChain(interceptors) - def _enforce_trailing_slash(self, url: URL) -> URL: + + def _enforce_trailing_slash(self, url: URL) -> URL: if url.raw_path.endswith(b"/"): return url return url.copy_with(raw_path=url.raw_path + b"/") @@ -839,9 +836,7 @@ def __init__( custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, _strict_response_validation: bool, - interceptors: list[Interceptor] | None = None, - ) -> None: kwargs: dict[str, Any] = {} if limits is not None: @@ -905,9 +900,7 @@ def __init__( custom_query=custom_query, custom_headers=custom_headers, _strict_response_validation=_strict_response_validation, - interceptors=interceptors, - ) self._client = http_client or SyncHttpxClientWrapper( base_url=base_url, @@ -1431,7 +1424,6 @@ def __init__( http_client: httpx.AsyncClient | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, - interceptors: list[Interceptor] | None = None, ) -> None: kwargs: dict[str, Any] = {} @@ -1496,9 +1488,7 @@ def __init__( custom_query=custom_query, custom_headers=custom_headers, _strict_response_validation=_strict_response_validation, - interceptors=interceptors, - ) self._client = http_client or AsyncHttpxClientWrapper( base_url=base_url, diff --git a/src/openai/_client.py b/src/openai/_client.py index 40abe08107..b181b656fe 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -7,6 +7,7 @@ from typing_extensions import Self, override import httpx + from openai._interceptor import Interceptor from . import _exceptions @@ -98,7 +99,7 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, interceptors: list[Interceptor] | None = None, - ) -> None: + ) -> None: """Construct a new synchronous openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: @@ -138,8 +139,8 @@ def __init__( custom_headers=default_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, - interceptors=interceptors, -) + interceptors=interceptors, + ) self._default_stream_cls = Stream @@ -329,7 +330,7 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, interceptors: list[Interceptor] | None = None, - ) -> None: + ) -> None: """Construct a new async openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: diff --git a/src/openai/_interceptor.py b/src/openai/_interceptor.py index 2aeb6e5650..ed7a51d823 100644 --- a/src/openai/_interceptor.py +++ b/src/openai/_interceptor.py @@ -1,8 +1,8 @@ from __future__ import annotations from abc import ABC, abstractmethod +from typing import Any, Dict, Union, Generic, TypeVar, Optional from dataclasses import dataclass -from typing import Dict, Optional, TypeVar, Generic, Any, Union import httpx @@ -10,24 +10,29 @@ T = TypeVar("T") + @dataclass class InterceptorRequest: """Request data container for interceptor processing""" + method: str url: str headers: Dict[str, str] params: Optional[Dict[str, Any]] = None body: Optional[Union[Body, bytes]] = None + @dataclass class InterceptorResponse(Generic[T]): """Response data container for interceptor processing""" + status_code: int headers: Dict[str, str] body: T request: InterceptorRequest raw_response: httpx.Response + class Interceptor(ABC): """Base class for request/response interceptors""" @@ -41,6 +46,7 @@ def after_response(self, response: InterceptorResponse[T]) -> InterceptorRespons """Process response after receiving""" pass + class InterceptorChain: """Chain of interceptors for sequential request/response processing""" @@ -55,7 +61,7 @@ def execute_before_request(self, request: InterceptorRequest) -> InterceptorRequ for interceptor in self._interceptors: try: current_request = interceptor.before_request(current_request) - except Exception as e: + except Exception: continue return current_request @@ -64,6 +70,6 @@ def execute_after_response(self, response: InterceptorResponse[T]) -> Intercepto for interceptor in self._interceptors: try: current_response = interceptor.after_response(current_response) - except Exception as e: + except Exception: continue - return current_response \ No newline at end of file + return current_response diff --git a/src/openai/lib/azure.py b/src/openai/lib/azure.py index f857d76e51..c7a7c984ab 100644 --- a/src/openai/lib/azure.py +++ b/src/openai/lib/azure.py @@ -7,6 +7,8 @@ import httpx +from openai._interceptor import Interceptor + from .._types import NOT_GIVEN, Omit, Query, Timeout, NotGiven from .._utils import is_given, is_mapping from .._client import OpenAI, AsyncOpenAI @@ -83,6 +85,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: ... @overload @@ -102,6 +105,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: ... @overload @@ -121,6 +125,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: ... def __init__( @@ -142,6 +147,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: """Construct a new synchronous azure openai client instance. @@ -220,6 +226,7 @@ def __init__( http_client=http_client, websocket_base_url=websocket_base_url, _strict_response_validation=_strict_response_validation, + interceptors=interceptors, ) self._api_version = api_version self._azure_ad_token = azure_ad_token @@ -244,6 +251,7 @@ def copy( set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, + interceptors: list[Interceptor] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ @@ -268,6 +276,7 @@ def copy( "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider, **_extra_kwargs, }, + interceptors=interceptors, ) with_options = copy @@ -343,6 +352,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: ... @overload @@ -383,6 +393,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: ... def __init__( @@ -404,6 +415,7 @@ def __init__( default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, + interceptors: list[Interceptor] | None = None, ) -> None: """Construct a new asynchronous azure openai client instance. @@ -482,6 +494,7 @@ def __init__( http_client=http_client, websocket_base_url=websocket_base_url, _strict_response_validation=_strict_response_validation, + interceptors=interceptors, ) self._api_version = api_version self._azure_ad_token = azure_ad_token @@ -507,6 +520,7 @@ def copy( default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, _extra_kwargs: Mapping[str, Any] = {}, + interceptors: list[Interceptor] | None = None, ) -> Self: """ Create a new client instance re-using the same options given to the current client with optional overriding. @@ -530,6 +544,7 @@ def copy( "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider, **_extra_kwargs, }, + interceptors=interceptors, ) with_options = copy diff --git a/tests/test_interceptors.py b/tests/test_interceptors.py index 3e9c883bf7..8dbfddd436 100644 --- a/tests/test_interceptors.py +++ b/tests/test_interceptors.py @@ -1,18 +1,20 @@ -import pytest -import httpx -from typing import Dict, Any, cast, TypeVar +from typing import Any, Dict, TypeVar, cast from typing_extensions import override -from openai._interceptor import InterceptorRequest, InterceptorResponse, Interceptor + +import httpx + +from openai._interceptor import Interceptor, InterceptorRequest, InterceptorResponse T = TypeVar("T") + class TestMessageModifierInterceptor: - def test_before_request_chat_completions(self, caplog: pytest.LogCaptureFixture) -> None: + def test_before_request_chat_completions(self) -> None: class MessageModifierInterceptor(Interceptor): @override def before_request(self, request: InterceptorRequest) -> InterceptorRequest: if isinstance(request.body, dict): - body = cast(Dict[str, Any], request.body) # type: ignore + body = cast(Dict[str, Any], request.body) # type: ignore if "messages" in body: print("\n=== Message Modification Process ===") for message in body["messages"]: @@ -36,29 +38,27 @@ def after_response(self, response: InterceptorResponse[Any]) -> InterceptorRespo method="post", url="https://api.openai.com/v1/chat/completions", headers={"Authorization": "Bearer test_key"}, - body={ - "model": "gpt-3.5-turbo", - "messages": [{"role": "user", "content": "Hello"}] - } + body={"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hello"}]}, ) processed_request = interceptor.before_request(request) # Verify the message was modified assert isinstance(processed_request.body, dict) - body = cast(Dict[str, Any], processed_request.body) # type: ignore + body = cast(Dict[str, Any], processed_request.body) # type: ignore assert body["messages"][0]["content"] == "Hello [Disclaimer: This is a modified message]" assert body["model"] == "gpt-3.5-turbo" # Other fields unchanged assert processed_request.method == "post" # Request properties unchanged assert processed_request.url == "https://api.openai.com/v1/chat/completions" - def test_before_request_non_chat_completions(self, caplog: pytest.LogCaptureFixture) -> None: + def test_before_request_non_chat_completions(self) -> None: """Test that the interceptor doesn't modify non-chat-completions requests""" + class MessageModifierInterceptor(Interceptor): @override def before_request(self, request: InterceptorRequest) -> InterceptorRequest: if isinstance(request.body, dict): - body = cast(Dict[str, Any], request.body) # type: ignore + body = cast(Dict[str, Any], request.body) # type: ignore if "messages" in body: print("\n=== Message Modification Process ===") for message in body["messages"]: @@ -82,27 +82,25 @@ def after_response(self, response: InterceptorResponse[Any]) -> InterceptorRespo method="post", url="https://api.openai.com/v1/embeddings", headers={"Authorization": "Bearer test_key"}, - body={ - "model": "text-embedding-ada-002", - "input": "Hello" - } + body={"model": "text-embedding-ada-002", "input": "Hello"}, ) processed_request = interceptor.before_request(request) # Verify the request was not modified assert isinstance(processed_request.body, dict) - body = cast(Dict[str, Any], processed_request.body) # type: ignore + body = cast(Dict[str, Any], processed_request.body) # type: ignore assert body["input"] == "Hello" # Content unchanged assert body["model"] == "text-embedding-ada-002" # Model unchanged - def test_after_response(self, caplog: pytest.LogCaptureFixture) -> None: + def test_after_response(self) -> None: """Test that after_response doesn't modify the response""" + class MessageModifierInterceptor(Interceptor): @override def before_request(self, request: InterceptorRequest) -> InterceptorRequest: if isinstance(request.body, dict): - body = cast(Dict[str, Any], request.body) # type: ignore + body = cast(Dict[str, Any], request.body) # type: ignore if "messages" in body: print("\n=== Message Modification Process ===") for message in body["messages"]: @@ -125,25 +123,25 @@ def after_response(self, response: InterceptorResponse[Any]) -> InterceptorRespo request = InterceptorRequest( method="post", url="https://api.openai.com/v1/chat/completions", - headers={"Authorization": "Bearer test_key"} + headers={"Authorization": "Bearer test_key"}, ) - + mock_raw_response = httpx.Response( status_code=200, headers={"Content-Type": "application/json"}, - json={"choices": [{"message": {"content": "Hello!"}}]} + json={"choices": [{"message": {"content": "Hello!"}}]}, ) - + response = InterceptorResponse[Dict[str, Any]]( status_code=200, headers={"Content-Type": "application/json"}, body={"choices": [{"message": {"content": "Hello!"}}]}, request=request, - raw_response=mock_raw_response + raw_response=mock_raw_response, ) processed_response = interceptor.after_response(response) - + # Verify response is unchanged assert processed_response == response assert processed_response.status_code == 200 From 8860871c11e177e6ba0821ca9019d6dfe2e90e51 Mon Sep 17 00:00:00 2001 From: Nisarg Patel <63173283+Nisarg38@users.noreply.github.com> Date: Sun, 2 Feb 2025 19:04:48 -0500 Subject: [PATCH 4/4] test(azure): add interceptor tests for Azure OpenAI clients This commit adds comprehensive test coverage for interceptors with Azure OpenAI clients, including: - Interceptor tests for chat completions and embeddings endpoints - Tests for both synchronous and asynchronous Azure clients - Verifying interceptor functionality for request modification --- src/openai/_utils/_sync.py | 1 + tests/lib/test_azure.py | 189 +++++++++++++++++++++++++++++++++---- 2 files changed, 171 insertions(+), 19 deletions(-) diff --git a/src/openai/_utils/_sync.py b/src/openai/_utils/_sync.py index 5d9e2c2ac9..8b3aaf2b5d 100644 --- a/src/openai/_utils/_sync.py +++ b/src/openai/_utils/_sync.py @@ -50,6 +50,7 @@ def blocking_func(arg1, arg2, kwarg1=None): # blocking code return result + result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1) ``` diff --git a/tests/lib/test_azure.py b/tests/lib/test_azure.py index 626d7df311..5071378ae9 100644 --- a/tests/lib/test_azure.py +++ b/tests/lib/test_azure.py @@ -1,6 +1,7 @@ +import json import logging -from typing import Union, cast -from typing_extensions import Literal, Protocol +from typing import Any, Dict, Union, cast +from typing_extensions import Literal, Protocol, override import httpx import pytest @@ -9,6 +10,7 @@ from openai._utils import SensitiveHeadersFilter, is_dict from openai._models import FinalRequestOptions from openai.lib.azure import AzureOpenAI, AsyncAzureOpenAI +from openai._interceptor import Interceptor, InterceptorRequest, InterceptorResponse Client = Union[AzureOpenAI, AsyncAzureOpenAI] @@ -153,7 +155,6 @@ def token_provider() -> str: class TestAzureLogging: - @pytest.fixture(autouse=True) def logger_with_filter(self) -> logging.Logger: logger = logging.getLogger("openai") @@ -165,9 +166,7 @@ def logger_with_filter(self) -> logging.Logger: def test_azure_api_key_redacted(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" - ).mock( - return_value=httpx.Response(200, json={"model": "gpt-4"}) - ) + ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AzureOpenAI( api_version="2024-06-01", @@ -182,14 +181,11 @@ def test_azure_api_key_redacted(self, respx_mock: MockRouter, caplog: pytest.Log if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["api-key"] == "" - @pytest.mark.respx() def test_azure_bearer_token_redacted(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" - ).mock( - return_value=httpx.Response(200, json={"model": "gpt-4"}) - ) + ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AzureOpenAI( api_version="2024-06-01", @@ -204,15 +200,12 @@ def test_azure_bearer_token_redacted(self, respx_mock: MockRouter, caplog: pytes if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["Authorization"] == "" - @pytest.mark.asyncio @pytest.mark.respx() async def test_azure_api_key_redacted_async(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" - ).mock( - return_value=httpx.Response(200, json={"model": "gpt-4"}) - ) + ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AsyncAzureOpenAI( api_version="2024-06-01", @@ -227,15 +220,14 @@ async def test_azure_api_key_redacted_async(self, respx_mock: MockRouter, caplog if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["api-key"] == "" - @pytest.mark.asyncio @pytest.mark.respx() - async def test_azure_bearer_token_redacted_async(self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture) -> None: + async def test_azure_bearer_token_redacted_async( + self, respx_mock: MockRouter, caplog: pytest.LogCaptureFixture + ) -> None: respx_mock.post( "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-06-01" - ).mock( - return_value=httpx.Response(200, json={"model": "gpt-4"}) - ) + ).mock(return_value=httpx.Response(200, json={"model": "gpt-4"})) client = AsyncAzureOpenAI( api_version="2024-06-01", @@ -249,3 +241,162 @@ async def test_azure_bearer_token_redacted_async(self, respx_mock: MockRouter, c for record in caplog.records: if is_dict(record.args) and record.args.get("headers") and is_dict(record.args["headers"]): assert record.args["headers"]["Authorization"] == "" + + +class TestAzureInterceptors: + def test_azure_interceptor_chat_completions(self) -> None: + """Test that interceptors work with Azure chat completions endpoint""" + + class AzureMessageModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "messages" in body: + for message in body["messages"]: + if message["role"] == "user": + message["content"] += " [Azure Modified]" + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + return response + + interceptor = AzureMessageModifierInterceptor() + request = InterceptorRequest( + method="post", + url="https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions", + headers={"api-key": "test_key"}, + body={"messages": [{"role": "user", "content": "Hello"}]}, + ) + + processed_request = interceptor.before_request(request) + + # Verify the message was modified + assert isinstance(processed_request.body, dict) + body = cast(Dict[str, Any], processed_request.body) # type: ignore + assert body["messages"][0]["content"] == "Hello [Azure Modified]" + assert processed_request.method == "post" + assert ( + processed_request.url + == "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions" + ) + + def test_azure_interceptor_embeddings(self) -> None: + """Test that interceptors work with Azure embeddings endpoint""" + + class AzureInputModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "input" in body: + body["input"] = f"{body['input']} [Azure Modified]" + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + return response + + interceptor = AzureInputModifierInterceptor() + request = InterceptorRequest( + method="post", + url="https://example-resource.azure.openai.com/openai/deployments/text-embedding-ada-002/embeddings", + headers={"api-key": "test_key"}, + body={"input": "Hello"}, + ) + + processed_request = interceptor.before_request(request) + + # Verify the input was modified + assert isinstance(processed_request.body, dict) + body = cast(Dict[str, Any], processed_request.body) # type: ignore + assert body["input"] == "Hello [Azure Modified]" + assert processed_request.method == "post" + assert ( + processed_request.url + == "https://example-resource.azure.openai.com/openai/deployments/text-embedding-ada-002/embeddings" + ) + + @pytest.mark.respx() + def test_azure_interceptor_with_client(self, respx_mock: MockRouter) -> None: + """Test that interceptors work when used with the Azure client""" + + class AzureMessageModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "messages" in body: + for message in body["messages"]: + if message["role"] == "user": + message["content"] += " [Azure Modified]" + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + return response + + respx_mock.post( + "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-01" + ).mock(return_value=httpx.Response(200, json={"choices": [{"message": {"content": "Hello!"}}]})) + + client = AzureOpenAI( + api_version="2024-02-01", + api_key="test_key", + azure_endpoint="https://example-resource.azure.openai.com", + interceptors=[AzureMessageModifierInterceptor()], + ) + + # Send request through client to trigger interceptor + client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Hello"}], + ) + + # Verify the request was intercepted by checking the recorded request + request = cast(MockRequestCall, respx_mock.calls[0]).request + body = json.loads(request.content) + assert body["messages"][0]["content"] == "Hello [Azure Modified]" + + @pytest.mark.asyncio + @pytest.mark.respx() + async def test_azure_interceptor_with_async_client(self, respx_mock: MockRouter) -> None: + """Test that interceptors work when used with the async Azure client""" + + class AzureMessageModifierInterceptor(Interceptor): + @override + def before_request(self, request: InterceptorRequest) -> InterceptorRequest: + if isinstance(request.body, dict): + body = cast(Dict[str, Any], request.body) # type: ignore + if "messages" in body: + for message in body["messages"]: + if message["role"] == "user": + message["content"] += " [Azure Modified]" + return request + + @override + def after_response(self, response: InterceptorResponse[Any]) -> InterceptorResponse[Any]: + return response + + respx_mock.post( + "https://example-resource.azure.openai.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-01" + ).mock(return_value=httpx.Response(200, json={"choices": [{"message": {"content": "Hello!"}}]})) + + client = AsyncAzureOpenAI( + api_version="2024-02-01", + api_key="test_key", + azure_endpoint="https://example-resource.azure.openai.com", + interceptors=[AzureMessageModifierInterceptor()], + ) + + # Send request through client to trigger interceptor + await client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Hello"}], + ) + + # Verify the request was intercepted by checking the recorded request + request = cast(MockRequestCall, respx_mock.calls[0]).request + body = json.loads(request.content) + assert body["messages"][0]["content"] == "Hello [Azure Modified]"