Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump openai end switch from dall-e-2 to dall-e-3 #104998

Merged
merged 16 commits into from
Dec 11, 2023
Merged
71 changes: 48 additions & 23 deletions homeassistant/components/openai_conversation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
"""The OpenAI Conversation integration."""
from __future__ import annotations

from functools import partial
import logging
from typing import Literal

import openai
from openai import error
import voluptuous as vol

from homeassistant.components import conversation
Expand All @@ -23,7 +21,13 @@
HomeAssistantError,
TemplateError,
)
from homeassistant.helpers import config_validation as cv, intent, selector, template
from homeassistant.helpers import (
config_validation as cv,
intent,
issue_registry as ir,
selector,
template,
)
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import ulid

Expand Down Expand Up @@ -52,17 +56,38 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:

async def render_image(call: ServiceCall) -> ServiceResponse:
"""Render an image with dall-e."""
client = hass.data[DOMAIN][call.data["config_entry"]]

if call.data["size"] in ("256", "512", "1024"):
ir.async_create_issue(
hass,
DOMAIN,
"image_size_deprecated_format",
breaks_in_ha_version="2024.7.0",
is_fixable=False,
is_persistent=True,
learn_more_url="https://www.home-assistant.io/integrations/openai_conversation/",
severity=ir.IssueSeverity.WARNING,
translation_key="image_size_deprecated_format",
)
size = "1024x1024"
else:
size = call.data["size"]

try:
response = await openai.Image.acreate(
api_key=hass.data[DOMAIN][call.data["config_entry"]],
response = await client.images.generate(
model="dall-e-3",
prompt=call.data["prompt"],
size=size,
quality=call.data["quality"],
style=call.data["style"],
response_format="url",
n=1,
size=f'{call.data["size"]}x{call.data["size"]}',
)
except error.OpenAIError as err:
except openai.OpenAIError as err:
raise HomeAssistantError(f"Error generating image: {err}") from err

return response["data"][0]
return response.data[0].model_dump(exclude={"b64_json"})

hass.services.async_register(
DOMAIN,
Expand All @@ -76,7 +101,11 @@ async def render_image(call: ServiceCall) -> ServiceResponse:
}
),
vol.Required("prompt"): cv.string,
vol.Optional("size", default="512"): vol.In(("256", "512", "1024")),
vol.Optional("size", default="1024x1024"): vol.In(
("1024x1024", "1024x1792", "1792x1024", "256", "512", "1024")
),
vol.Optional("quality", default="standard"): vol.In(("standard", "hd")),
vol.Optional("style", default="vivid"): vol.In(("vivid", "natural")),
}
),
supports_response=SupportsResponse.ONLY,
Expand All @@ -86,21 +115,16 @@ async def render_image(call: ServiceCall) -> ServiceResponse:

async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up OpenAI Conversation from a config entry."""
client = openai.AsyncOpenAI(api_key=entry.data[CONF_API_KEY])
try:
await hass.async_add_executor_job(
partial(
openai.Model.list,
api_key=entry.data[CONF_API_KEY],
request_timeout=10,
)
)
except error.AuthenticationError as err:
await hass.async_add_executor_job(client.with_options(timeout=10.0).models.list)
except openai.AuthenticationError as err:
_LOGGER.error("Invalid API key: %s", err)
return False
except error.OpenAIError as err:
except openai.OpenAIError as err:
raise ConfigEntryNotReady(err) from err

hass.data.setdefault(DOMAIN, {})[entry.entry_id] = entry.data[CONF_API_KEY]
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = client

conversation.async_set_agent(hass, entry, OpenAIAgent(hass, entry))
return True
Expand Down Expand Up @@ -160,17 +184,18 @@ async def async_process(

_LOGGER.debug("Prompt for %s: %s", model, messages)

client = self.hass.data[DOMAIN][self.entry.entry_id]

try:
result = await openai.ChatCompletion.acreate(
api_key=self.entry.data[CONF_API_KEY],
result = await client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
top_p=top_p,
temperature=temperature,
user=conversation_id,
)
except error.OpenAIError as err:
except openai.OpenAIError as err:
intent_response = intent.IntentResponse(language=user_input.language)
intent_response.async_set_error(
intent.IntentResponseErrorCode.UNKNOWN,
Expand All @@ -181,7 +206,7 @@ async def async_process(
)

_LOGGER.debug("Response %s", result)
response = result["choices"][0]["message"]
response = result.choices[0].message.model_dump(include={"role", "content"})
messages.append(response)
self.history[conversation_id] = messages

Expand Down
10 changes: 4 additions & 6 deletions homeassistant/components/openai_conversation/config_flow.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
"""Config flow for OpenAI Conversation integration."""
from __future__ import annotations

from functools import partial
import logging
import types
from types import MappingProxyType
from typing import Any

import openai
from openai import error
import voluptuous as vol

from homeassistant import config_entries
Expand Down Expand Up @@ -59,8 +57,8 @@ async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> None:

Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.
"""
openai.api_key = data[CONF_API_KEY]
await hass.async_add_executor_job(partial(openai.Model.list, request_timeout=10))
client = openai.AsyncOpenAI(api_key=data[CONF_API_KEY])
await hass.async_add_executor_job(client.with_options(timeout=10.0).models.list)


class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
Expand All @@ -81,9 +79,9 @@ async def async_step_user(

try:
await validate_input(self.hass, user_input)
except error.APIConnectionError:
except openai.APIConnectionError:
errors["base"] = "cannot_connect"
except error.AuthenticationError:
except openai.AuthenticationError:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@
"documentation": "https://www.home-assistant.io/integrations/openai_conversation",
"integration_type": "service",
"iot_class": "cloud_polling",
"requirements": ["openai==0.27.2"]
"requirements": ["openai==1.3.8"]
}
30 changes: 24 additions & 6 deletions homeassistant/components/openai_conversation/services.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,30 @@ generate_image:
text:
multiline: true
size:
required: true
example: "512"
default: "512"
required: false
example: "1024x1024"
default: "1024x1024"
selector:
select:
options:
- "1024x1024"
- "1024x1792"
- "1792x1024"
quality:
required: false
example: "standard"
default: "standard"
selector:
select:
options:
- "standard"
- "hd"
style:
required: false
example: "vivid"
default: "vivid"
selector:
select:
options:
- "256"
- "512"
- "1024"
- "vivid"
- "natural"
14 changes: 14 additions & 0 deletions homeassistant/components/openai_conversation/strings.json
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,22 @@
"size": {
"name": "Size",
"description": "The size of the image to generate"
},
"quality": {
"name": "Quality",
"description": "The quality of the image that will be generated"
},
"style": {
"name": "Style",
"description": "The style of the generated image"
}
}
}
},
"issues": {
"image_size_deprecated_format": {
"title": "Deprecated size format for image generation service",
"description": "OpenAI is now using Dall-E 3 to generate images when calling `openai_conversation.generate_image`, which supports different sizes. Valid values are now \"1024x1024\", \"1024x1792\", \"1792x1024\". The old values of \"256\", \"512\", \"1024\" are currently interpreted as \"1024x1024\".\nPlease update your scripts or automations with the new parameters."
}
}
}
2 changes: 1 addition & 1 deletion requirements_all.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1393,7 +1393,7 @@ open-garage==0.2.0
open-meteo==0.3.1

# homeassistant.components.openai_conversation
openai==0.27.2
openai==1.3.8

# homeassistant.components.opencv
# opencv-python-headless==4.6.0.66
Expand Down
2 changes: 1 addition & 1 deletion requirements_test_all.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1087,7 +1087,7 @@ open-garage==0.2.0
open-meteo==0.3.1

# homeassistant.components.openai_conversation
openai==0.27.2
openai==1.3.8

# homeassistant.components.openerz
openerz-api==0.2.0
Expand Down
2 changes: 1 addition & 1 deletion tests/components/openai_conversation/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def mock_config_entry(hass):
async def mock_init_component(hass, mock_config_entry):
"""Initialize integration."""
with patch(
"openai.Model.list",
"openai.resources.models.AsyncModels.list",
):
assert await async_setup_component(hass, "openai_conversation", {})
await hass.async_block_till_done()
23 changes: 17 additions & 6 deletions tests/components/openai_conversation/test_config_flow.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
"""Test the OpenAI Conversation config flow."""
from unittest.mock import patch

from openai.error import APIConnectionError, AuthenticationError, InvalidRequestError
from httpx import Response
from openai import APIConnectionError, AuthenticationError, BadRequestError
import pytest

from homeassistant import config_entries
Expand Down Expand Up @@ -32,7 +33,7 @@ async def test_form(hass: HomeAssistant) -> None:
assert result["errors"] is None

with patch(
"homeassistant.components.openai_conversation.config_flow.openai.Model.list",
"homeassistant.components.openai_conversation.config_flow.openai.resources.models.AsyncModels.list",
), patch(
"homeassistant.components.openai_conversation.async_setup_entry",
return_value=True,
Expand Down Expand Up @@ -76,9 +77,19 @@ async def test_options(
@pytest.mark.parametrize(
("side_effect", "error"),
[
(APIConnectionError(""), "cannot_connect"),
(AuthenticationError, "invalid_auth"),
(InvalidRequestError, "unknown"),
(APIConnectionError(request=None), "cannot_connect"),
(
AuthenticationError(
response=Response(status_code=None, request=""), body=None, message=None
),
"invalid_auth",
),
(
BadRequestError(
response=Response(status_code=None, request=""), body=None, message=None
),
"unknown",
),
],
)
async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> None:
Expand All @@ -88,7 +99,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
)

with patch(
"homeassistant.components.openai_conversation.config_flow.openai.Model.list",
"homeassistant.components.openai_conversation.config_flow.openai.resources.models.AsyncModels.list",
side_effect=side_effect,
):
result2 = await hass.config_entries.flow.async_configure(
Expand Down
Loading