Skip to content

Commit 4183118

Browse files
Auto-generated API code
1 parent 8dd1fb6 commit 4183118

File tree

5 files changed

+271
-6
lines changed

5 files changed

+271
-6
lines changed

elasticsearch/_async/client/__init__.py

+28
Original file line numberDiff line numberDiff line change
@@ -1121,12 +1121,17 @@ async def create(
11211121
error_trace: t.Optional[bool] = None,
11221122
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
11231123
human: t.Optional[bool] = None,
1124+
if_primary_term: t.Optional[int] = None,
1125+
if_seq_no: t.Optional[int] = None,
11241126
include_source_on_error: t.Optional[bool] = None,
1127+
op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None,
11251128
pipeline: t.Optional[str] = None,
11261129
pretty: t.Optional[bool] = None,
11271130
refresh: t.Optional[
11281131
t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
11291132
] = None,
1133+
require_alias: t.Optional[bool] = None,
1134+
require_data_stream: t.Optional[bool] = None,
11301135
routing: t.Optional[str] = None,
11311136
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
11321137
version: t.Optional[int] = None,
@@ -1204,8 +1209,18 @@ async def create(
12041209
:param id: A unique identifier for the document. To automatically generate a
12051210
document ID, use the `POST /<target>/_doc/` request format.
12061211
:param document:
1212+
:param if_primary_term: Only perform the operation if the document has this primary
1213+
term.
1214+
:param if_seq_no: Only perform the operation if the document has this sequence
1215+
number.
12071216
:param include_source_on_error: True or false if to include the document source
12081217
in the error message in case of parsing errors.
1218+
:param op_type: Set to `create` to only index the document if it does not already
1219+
exist (put if absent). If a document with the specified `_id` already exists,
1220+
the indexing operation will fail. The behavior is the same as using the `<index>/_create`
1221+
endpoint. If a document ID is specified, this paramater defaults to `index`.
1222+
Otherwise, it defaults to `create`. If the request targets a data stream,
1223+
an `op_type` of `create` is required.
12091224
:param pipeline: The ID of the pipeline to use to preprocess incoming documents.
12101225
If the index has a default ingest pipeline specified, setting the value to
12111226
`_none` turns off the default ingest pipeline for this request. If a final
@@ -1214,6 +1229,9 @@ async def create(
12141229
:param refresh: If `true`, Elasticsearch refreshes the affected shards to make
12151230
this operation visible to search. If `wait_for`, it waits for a refresh to
12161231
make this operation visible to search. If `false`, it does nothing with refreshes.
1232+
:param require_alias: If `true`, the destination must be an index alias.
1233+
:param require_data_stream: If `true`, the request's actions must target a data
1234+
stream (existing or to be created).
12171235
:param routing: A custom value that is used to route operations to a specific
12181236
shard.
12191237
:param timeout: The period the request waits for the following operations: automatic
@@ -1254,14 +1272,24 @@ async def create(
12541272
__query["filter_path"] = filter_path
12551273
if human is not None:
12561274
__query["human"] = human
1275+
if if_primary_term is not None:
1276+
__query["if_primary_term"] = if_primary_term
1277+
if if_seq_no is not None:
1278+
__query["if_seq_no"] = if_seq_no
12571279
if include_source_on_error is not None:
12581280
__query["include_source_on_error"] = include_source_on_error
1281+
if op_type is not None:
1282+
__query["op_type"] = op_type
12591283
if pipeline is not None:
12601284
__query["pipeline"] = pipeline
12611285
if pretty is not None:
12621286
__query["pretty"] = pretty
12631287
if refresh is not None:
12641288
__query["refresh"] = refresh
1289+
if require_alias is not None:
1290+
__query["require_alias"] = require_alias
1291+
if require_data_stream is not None:
1292+
__query["require_data_stream"] = require_data_stream
12651293
if routing is not None:
12661294
__query["routing"] = routing
12671295
if timeout is not None:

elasticsearch/_async/client/inference.py

+99-1
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,104 @@ async def put(
321321
path_parts=__path_parts,
322322
)
323323

324+
@_rewrite_parameters(
325+
body_fields=(
326+
"service",
327+
"service_settings",
328+
"chunking_settings",
329+
"task_settings",
330+
),
331+
)
332+
async def put_openai(
333+
self,
334+
*,
335+
task_type: t.Union[
336+
str, t.Literal["chat_completion", "completion", "text_embedding"]
337+
],
338+
openai_inference_id: str,
339+
service: t.Optional[t.Union[str, t.Literal["openai"]]] = None,
340+
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
341+
chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
342+
error_trace: t.Optional[bool] = None,
343+
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
344+
human: t.Optional[bool] = None,
345+
pretty: t.Optional[bool] = None,
346+
task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
347+
body: t.Optional[t.Dict[str, t.Any]] = None,
348+
) -> ObjectApiResponse[t.Any]:
349+
"""
350+
.. raw:: html
351+
352+
<p>Create an OpenAI inference endpoint.</p>
353+
<p>Create an inference endpoint to perform an inference task with the <code>openai</code> service.</p>
354+
<p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
355+
After creating the endpoint, wait for the model deployment to complete before using it.
356+
To verify the deployment status, use the get trained model statistics API.
357+
Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
358+
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
359+
360+
361+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-openai.html>`_
362+
363+
:param task_type: The type of the inference task that the model will perform.
364+
NOTE: The `chat_completion` task type only supports streaming and only through
365+
the _stream API.
366+
:param openai_inference_id: The unique identifier of the inference endpoint.
367+
:param service: The type of service supported for the specified task type. In
368+
this case, `openai`.
369+
:param service_settings: Settings used to install the inference model. These
370+
settings are specific to the `openai` service.
371+
:param chunking_settings: The chunking configuration object.
372+
:param task_settings: Settings to configure the inference task. These settings
373+
are specific to the task type you specified.
374+
"""
375+
if task_type in SKIP_IN_PATH:
376+
raise ValueError("Empty value passed for parameter 'task_type'")
377+
if openai_inference_id in SKIP_IN_PATH:
378+
raise ValueError("Empty value passed for parameter 'openai_inference_id'")
379+
if service is None and body is None:
380+
raise ValueError("Empty value passed for parameter 'service'")
381+
if service_settings is None and body is None:
382+
raise ValueError("Empty value passed for parameter 'service_settings'")
383+
__path_parts: t.Dict[str, str] = {
384+
"task_type": _quote(task_type),
385+
"openai_inference_id": _quote(openai_inference_id),
386+
}
387+
__path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}'
388+
__query: t.Dict[str, t.Any] = {}
389+
__body: t.Dict[str, t.Any] = body if body is not None else {}
390+
if error_trace is not None:
391+
__query["error_trace"] = error_trace
392+
if filter_path is not None:
393+
__query["filter_path"] = filter_path
394+
if human is not None:
395+
__query["human"] = human
396+
if pretty is not None:
397+
__query["pretty"] = pretty
398+
if not __body:
399+
if service is not None:
400+
__body["service"] = service
401+
if service_settings is not None:
402+
__body["service_settings"] = service_settings
403+
if chunking_settings is not None:
404+
__body["chunking_settings"] = chunking_settings
405+
if task_settings is not None:
406+
__body["task_settings"] = task_settings
407+
if not __body:
408+
__body = None # type: ignore[assignment]
409+
__headers = {"accept": "application/json"}
410+
if __body is not None:
411+
__headers["content-type"] = "application/json"
412+
return await self.perform_request( # type: ignore[return-value]
413+
"PUT",
414+
__path,
415+
params=__query,
416+
headers=__headers,
417+
body=__body,
418+
endpoint_id="inference.put_openai",
419+
path_parts=__path_parts,
420+
)
421+
324422
@_rewrite_parameters(
325423
body_fields=("service", "service_settings"),
326424
)
@@ -341,7 +439,7 @@ async def put_watsonx(
341439
.. raw:: html
342440
343441
<p>Create a Watsonx inference endpoint.</p>
344-
<p>Creates an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
442+
<p>Create an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
345443
You need an IBM Cloud Databases for Elasticsearch deployment to use the <code>watsonxai</code> inference service.
346444
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.</p>
347445
<p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.

elasticsearch/_sync/client/__init__.py

+28
Original file line numberDiff line numberDiff line change
@@ -1119,12 +1119,17 @@ def create(
11191119
error_trace: t.Optional[bool] = None,
11201120
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
11211121
human: t.Optional[bool] = None,
1122+
if_primary_term: t.Optional[int] = None,
1123+
if_seq_no: t.Optional[int] = None,
11221124
include_source_on_error: t.Optional[bool] = None,
1125+
op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None,
11231126
pipeline: t.Optional[str] = None,
11241127
pretty: t.Optional[bool] = None,
11251128
refresh: t.Optional[
11261129
t.Union[bool, str, t.Literal["false", "true", "wait_for"]]
11271130
] = None,
1131+
require_alias: t.Optional[bool] = None,
1132+
require_data_stream: t.Optional[bool] = None,
11281133
routing: t.Optional[str] = None,
11291134
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
11301135
version: t.Optional[int] = None,
@@ -1202,8 +1207,18 @@ def create(
12021207
:param id: A unique identifier for the document. To automatically generate a
12031208
document ID, use the `POST /<target>/_doc/` request format.
12041209
:param document:
1210+
:param if_primary_term: Only perform the operation if the document has this primary
1211+
term.
1212+
:param if_seq_no: Only perform the operation if the document has this sequence
1213+
number.
12051214
:param include_source_on_error: True or false if to include the document source
12061215
in the error message in case of parsing errors.
1216+
:param op_type: Set to `create` to only index the document if it does not already
1217+
exist (put if absent). If a document with the specified `_id` already exists,
1218+
the indexing operation will fail. The behavior is the same as using the `<index>/_create`
1219+
endpoint. If a document ID is specified, this paramater defaults to `index`.
1220+
Otherwise, it defaults to `create`. If the request targets a data stream,
1221+
an `op_type` of `create` is required.
12071222
:param pipeline: The ID of the pipeline to use to preprocess incoming documents.
12081223
If the index has a default ingest pipeline specified, setting the value to
12091224
`_none` turns off the default ingest pipeline for this request. If a final
@@ -1212,6 +1227,9 @@ def create(
12121227
:param refresh: If `true`, Elasticsearch refreshes the affected shards to make
12131228
this operation visible to search. If `wait_for`, it waits for a refresh to
12141229
make this operation visible to search. If `false`, it does nothing with refreshes.
1230+
:param require_alias: If `true`, the destination must be an index alias.
1231+
:param require_data_stream: If `true`, the request's actions must target a data
1232+
stream (existing or to be created).
12151233
:param routing: A custom value that is used to route operations to a specific
12161234
shard.
12171235
:param timeout: The period the request waits for the following operations: automatic
@@ -1252,14 +1270,24 @@ def create(
12521270
__query["filter_path"] = filter_path
12531271
if human is not None:
12541272
__query["human"] = human
1273+
if if_primary_term is not None:
1274+
__query["if_primary_term"] = if_primary_term
1275+
if if_seq_no is not None:
1276+
__query["if_seq_no"] = if_seq_no
12551277
if include_source_on_error is not None:
12561278
__query["include_source_on_error"] = include_source_on_error
1279+
if op_type is not None:
1280+
__query["op_type"] = op_type
12571281
if pipeline is not None:
12581282
__query["pipeline"] = pipeline
12591283
if pretty is not None:
12601284
__query["pretty"] = pretty
12611285
if refresh is not None:
12621286
__query["refresh"] = refresh
1287+
if require_alias is not None:
1288+
__query["require_alias"] = require_alias
1289+
if require_data_stream is not None:
1290+
__query["require_data_stream"] = require_data_stream
12631291
if routing is not None:
12641292
__query["routing"] = routing
12651293
if timeout is not None:

elasticsearch/_sync/client/inference.py

+99-1
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,104 @@ def put(
321321
path_parts=__path_parts,
322322
)
323323

324+
@_rewrite_parameters(
325+
body_fields=(
326+
"service",
327+
"service_settings",
328+
"chunking_settings",
329+
"task_settings",
330+
),
331+
)
332+
def put_openai(
333+
self,
334+
*,
335+
task_type: t.Union[
336+
str, t.Literal["chat_completion", "completion", "text_embedding"]
337+
],
338+
openai_inference_id: str,
339+
service: t.Optional[t.Union[str, t.Literal["openai"]]] = None,
340+
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
341+
chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
342+
error_trace: t.Optional[bool] = None,
343+
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
344+
human: t.Optional[bool] = None,
345+
pretty: t.Optional[bool] = None,
346+
task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
347+
body: t.Optional[t.Dict[str, t.Any]] = None,
348+
) -> ObjectApiResponse[t.Any]:
349+
"""
350+
.. raw:: html
351+
352+
<p>Create an OpenAI inference endpoint.</p>
353+
<p>Create an inference endpoint to perform an inference task with the <code>openai</code> service.</p>
354+
<p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
355+
After creating the endpoint, wait for the model deployment to complete before using it.
356+
To verify the deployment status, use the get trained model statistics API.
357+
Look for <code>&quot;state&quot;: &quot;fully_allocated&quot;</code> in the response and ensure that the <code>&quot;allocation_count&quot;</code> matches the <code>&quot;target_allocation_count&quot;</code>.
358+
Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.</p>
359+
360+
361+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-openai.html>`_
362+
363+
:param task_type: The type of the inference task that the model will perform.
364+
NOTE: The `chat_completion` task type only supports streaming and only through
365+
the _stream API.
366+
:param openai_inference_id: The unique identifier of the inference endpoint.
367+
:param service: The type of service supported for the specified task type. In
368+
this case, `openai`.
369+
:param service_settings: Settings used to install the inference model. These
370+
settings are specific to the `openai` service.
371+
:param chunking_settings: The chunking configuration object.
372+
:param task_settings: Settings to configure the inference task. These settings
373+
are specific to the task type you specified.
374+
"""
375+
if task_type in SKIP_IN_PATH:
376+
raise ValueError("Empty value passed for parameter 'task_type'")
377+
if openai_inference_id in SKIP_IN_PATH:
378+
raise ValueError("Empty value passed for parameter 'openai_inference_id'")
379+
if service is None and body is None:
380+
raise ValueError("Empty value passed for parameter 'service'")
381+
if service_settings is None and body is None:
382+
raise ValueError("Empty value passed for parameter 'service_settings'")
383+
__path_parts: t.Dict[str, str] = {
384+
"task_type": _quote(task_type),
385+
"openai_inference_id": _quote(openai_inference_id),
386+
}
387+
__path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}'
388+
__query: t.Dict[str, t.Any] = {}
389+
__body: t.Dict[str, t.Any] = body if body is not None else {}
390+
if error_trace is not None:
391+
__query["error_trace"] = error_trace
392+
if filter_path is not None:
393+
__query["filter_path"] = filter_path
394+
if human is not None:
395+
__query["human"] = human
396+
if pretty is not None:
397+
__query["pretty"] = pretty
398+
if not __body:
399+
if service is not None:
400+
__body["service"] = service
401+
if service_settings is not None:
402+
__body["service_settings"] = service_settings
403+
if chunking_settings is not None:
404+
__body["chunking_settings"] = chunking_settings
405+
if task_settings is not None:
406+
__body["task_settings"] = task_settings
407+
if not __body:
408+
__body = None # type: ignore[assignment]
409+
__headers = {"accept": "application/json"}
410+
if __body is not None:
411+
__headers["content-type"] = "application/json"
412+
return self.perform_request( # type: ignore[return-value]
413+
"PUT",
414+
__path,
415+
params=__query,
416+
headers=__headers,
417+
body=__body,
418+
endpoint_id="inference.put_openai",
419+
path_parts=__path_parts,
420+
)
421+
324422
@_rewrite_parameters(
325423
body_fields=("service", "service_settings"),
326424
)
@@ -341,7 +439,7 @@ def put_watsonx(
341439
.. raw:: html
342440
343441
<p>Create a Watsonx inference endpoint.</p>
344-
<p>Creates an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
442+
<p>Create an inference endpoint to perform an inference task with the <code>watsonxai</code> service.
345443
You need an IBM Cloud Databases for Elasticsearch deployment to use the <code>watsonxai</code> inference service.
346444
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.</p>
347445
<p>When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.

0 commit comments

Comments
 (0)