Skip to content

Commit 0f30a25

Browse files
release: 0.4.0-alpha.10 (#303)
Automated Release PR --- ## 0.4.0-alpha.10 (2025-11-17) Full Changelog: [v0.4.0-alpha.9...v0.4.0-alpha.10](v0.4.0-alpha.9...v0.4.0-alpha.10) ### Bug Fixes * **openapi:** restore embedded request wrappers ([261e364](261e364)) --- This pull request is managed by Stainless's [GitHub App](https://github.com/apps/stainless-app). The [semver version number](https://semver.org/#semantic-versioning-specification-semver) is based on included [commit messages](https://www.conventionalcommits.org/en/v1.0.0/). Alternatively, you can manually set the version number in the title of this pull request. For a better experience, it is recommended to use either rebase-merge or squash-merge when merging this pull request. 🔗 Stainless [website](https://www.stainlessapi.com) 📚 Read the [docs](https://app.stainlessapi.com/docs) 🙋 [Reach out](mailto:[email protected]) for help or questions --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent 28e6a3e commit 0f30a25

File tree

14 files changed

+410
-443
lines changed

14 files changed

+410
-443
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.4.0-alpha.9"
2+
".": "0.4.0-alpha.10"
33
}

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 103
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-2b99a80543f8bc8fa164167693c214651ac8e710f4726fb5869183b4d6c71a03.yml
3-
openapi_spec_hash: a5632057f5e4d956a71c20a79c0d879c
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-aab1b331382f758fc255f765e73b62fedf463cf0748bc11b2b08974de9ac816a.yml
3+
openapi_spec_hash: f717a21f47419aa51e4d9298aa68cc45
44
config_hash: 0017f6c419cbbf7b949f9b2842917a79

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# Changelog
22

3+
## 0.4.0-alpha.10 (2025-11-17)
4+
5+
Full Changelog: [v0.4.0-alpha.9...v0.4.0-alpha.10](https://github.com/llamastack/llama-stack-client-python/compare/v0.4.0-alpha.9...v0.4.0-alpha.10)
6+
7+
### Bug Fixes
8+
9+
* **openapi:** restore embedded request wrappers ([261e364](https://github.com/llamastack/llama-stack-client-python/commit/261e3640c942c60860af08cd4d205d8e402bb702))
10+
311
## 0.4.0-alpha.9 (2025-11-14)
412

513
Full Changelog: [v0.4.0-alpha.8...v0.4.0-alpha.9](https://github.com/llamastack/llama-stack-client-python/compare/v0.4.0-alpha.8...v0.4.0-alpha.9)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "llama_stack_client"
3-
version = "0.4.0-alpha.9"
3+
version = "0.4.0-alpha.10"
44
description = "The official Python library for the llama-stack-client API"
55
dynamic = ["readme"]
66
license = "MIT"

src/llama_stack_client/resources/alpha/eval/eval.py

Lines changed: 14 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
from __future__ import annotations
1010

11-
from typing import Dict, Iterable, Optional
11+
from typing import Dict, Iterable
1212

1313
import httpx
1414

@@ -20,7 +20,7 @@
2020
JobsResourceWithStreamingResponse,
2121
AsyncJobsResourceWithStreamingResponse,
2222
)
23-
from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
23+
from ...._types import Body, Query, Headers, NotGiven, SequenceNotStr, not_given
2424
from ...._utils import maybe_transform, async_maybe_transform
2525
from ...._compat import cached_property
2626
from ...._resource import SyncAPIResource, AsyncAPIResource
@@ -164,9 +164,7 @@ def run_eval(
164164
self,
165165
benchmark_id: str,
166166
*,
167-
eval_candidate: eval_run_eval_params.EvalCandidate,
168-
num_examples: Optional[int] | Omit = omit,
169-
scoring_params: Dict[str, eval_run_eval_params.ScoringParams] | Omit = omit,
167+
benchmark_config: BenchmarkConfigParam,
170168
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
171169
# The extra values given here take precedence over values defined on the client or passed to this method.
172170
extra_headers: Headers | None = None,
@@ -178,13 +176,7 @@ def run_eval(
178176
Run an evaluation on a benchmark.
179177
180178
Args:
181-
eval_candidate: A model candidate for evaluation.
182-
183-
num_examples: Number of examples to evaluate (useful for testing), if not provided, all
184-
examples in the dataset will be evaluated
185-
186-
scoring_params: Map between scoring function id and parameters for each scoring function you
187-
want to run
179+
benchmark_config: A benchmark configuration for evaluation.
188180
189181
extra_headers: Send extra headers
190182
@@ -198,14 +190,7 @@ def run_eval(
198190
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
199191
return self._post(
200192
f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
201-
body=maybe_transform(
202-
{
203-
"eval_candidate": eval_candidate,
204-
"num_examples": num_examples,
205-
"scoring_params": scoring_params,
206-
},
207-
eval_run_eval_params.EvalRunEvalParams,
208-
),
193+
body=maybe_transform({"benchmark_config": benchmark_config}, eval_run_eval_params.EvalRunEvalParams),
209194
options=make_request_options(
210195
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
211196
),
@@ -216,9 +201,7 @@ def run_eval_alpha(
216201
self,
217202
benchmark_id: str,
218203
*,
219-
eval_candidate: eval_run_eval_alpha_params.EvalCandidate,
220-
num_examples: Optional[int] | Omit = omit,
221-
scoring_params: Dict[str, eval_run_eval_alpha_params.ScoringParams] | Omit = omit,
204+
benchmark_config: BenchmarkConfigParam,
222205
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
223206
# The extra values given here take precedence over values defined on the client or passed to this method.
224207
extra_headers: Headers | None = None,
@@ -230,13 +213,7 @@ def run_eval_alpha(
230213
Run an evaluation on a benchmark.
231214
232215
Args:
233-
eval_candidate: A model candidate for evaluation.
234-
235-
num_examples: Number of examples to evaluate (useful for testing), if not provided, all
236-
examples in the dataset will be evaluated
237-
238-
scoring_params: Map between scoring function id and parameters for each scoring function you
239-
want to run
216+
benchmark_config: A benchmark configuration for evaluation.
240217
241218
extra_headers: Send extra headers
242219
@@ -251,12 +228,7 @@ def run_eval_alpha(
251228
return self._post(
252229
f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
253230
body=maybe_transform(
254-
{
255-
"eval_candidate": eval_candidate,
256-
"num_examples": num_examples,
257-
"scoring_params": scoring_params,
258-
},
259-
eval_run_eval_alpha_params.EvalRunEvalAlphaParams,
231+
{"benchmark_config": benchmark_config}, eval_run_eval_alpha_params.EvalRunEvalAlphaParams
260232
),
261233
options=make_request_options(
262234
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -385,9 +357,7 @@ async def run_eval(
385357
self,
386358
benchmark_id: str,
387359
*,
388-
eval_candidate: eval_run_eval_params.EvalCandidate,
389-
num_examples: Optional[int] | Omit = omit,
390-
scoring_params: Dict[str, eval_run_eval_params.ScoringParams] | Omit = omit,
360+
benchmark_config: BenchmarkConfigParam,
391361
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
392362
# The extra values given here take precedence over values defined on the client or passed to this method.
393363
extra_headers: Headers | None = None,
@@ -399,13 +369,7 @@ async def run_eval(
399369
Run an evaluation on a benchmark.
400370
401371
Args:
402-
eval_candidate: A model candidate for evaluation.
403-
404-
num_examples: Number of examples to evaluate (useful for testing), if not provided, all
405-
examples in the dataset will be evaluated
406-
407-
scoring_params: Map between scoring function id and parameters for each scoring function you
408-
want to run
372+
benchmark_config: A benchmark configuration for evaluation.
409373
410374
extra_headers: Send extra headers
411375
@@ -420,12 +384,7 @@ async def run_eval(
420384
return await self._post(
421385
f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
422386
body=await async_maybe_transform(
423-
{
424-
"eval_candidate": eval_candidate,
425-
"num_examples": num_examples,
426-
"scoring_params": scoring_params,
427-
},
428-
eval_run_eval_params.EvalRunEvalParams,
387+
{"benchmark_config": benchmark_config}, eval_run_eval_params.EvalRunEvalParams
429388
),
430389
options=make_request_options(
431390
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -437,9 +396,7 @@ async def run_eval_alpha(
437396
self,
438397
benchmark_id: str,
439398
*,
440-
eval_candidate: eval_run_eval_alpha_params.EvalCandidate,
441-
num_examples: Optional[int] | Omit = omit,
442-
scoring_params: Dict[str, eval_run_eval_alpha_params.ScoringParams] | Omit = omit,
399+
benchmark_config: BenchmarkConfigParam,
443400
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
444401
# The extra values given here take precedence over values defined on the client or passed to this method.
445402
extra_headers: Headers | None = None,
@@ -451,13 +408,7 @@ async def run_eval_alpha(
451408
Run an evaluation on a benchmark.
452409
453410
Args:
454-
eval_candidate: A model candidate for evaluation.
455-
456-
num_examples: Number of examples to evaluate (useful for testing), if not provided, all
457-
examples in the dataset will be evaluated
458-
459-
scoring_params: Map between scoring function id and parameters for each scoring function you
460-
want to run
411+
benchmark_config: A benchmark configuration for evaluation.
461412
462413
extra_headers: Send extra headers
463414
@@ -472,12 +423,7 @@ async def run_eval_alpha(
472423
return await self._post(
473424
f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
474425
body=await async_maybe_transform(
475-
{
476-
"eval_candidate": eval_candidate,
477-
"num_examples": num_examples,
478-
"scoring_params": scoring_params,
479-
},
480-
eval_run_eval_alpha_params.EvalRunEvalAlphaParams,
426+
{"benchmark_config": benchmark_config}, eval_run_eval_alpha_params.EvalRunEvalAlphaParams
481427
),
482428
options=make_request_options(
483429
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout

src/llama_stack_client/resources/beta/datasets.py

Lines changed: 23 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
import typing_extensions
1212
from typing import Dict, Type, Iterable, Optional, cast
13+
from typing_extensions import Literal
1314

1415
import httpx
1516

@@ -205,21 +206,26 @@ def iterrows(
205206
def register(
206207
self,
207208
*,
208-
purpose: object,
209-
source: object,
210-
dataset_id: object | Omit = omit,
211-
metadata: object | Omit = omit,
209+
purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
210+
source: dataset_register_params.Source,
211+
dataset_id: Optional[str] | Omit = omit,
212+
metadata: Optional[Dict[str, object]] | Omit = omit,
212213
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
213214
# The extra values given here take precedence over values defined on the client or passed to this method.
214215
extra_headers: Headers | None = None,
215216
extra_query: Query | None = None,
216217
extra_body: Body | None = None,
217218
timeout: float | httpx.Timeout | None | NotGiven = not_given,
218219
) -> DatasetRegisterResponse:
219-
"""
220-
Register a new dataset.
220+
"""Register a new dataset.
221221
222222
Args:
223+
purpose: Purpose of the dataset.
224+
225+
Each purpose has a required input data schema.
226+
227+
source: A dataset that can be obtained from a URI.
228+
223229
extra_headers: Send extra headers
224230
225231
extra_query: Add additional query parameters to the request
@@ -452,21 +458,26 @@ async def iterrows(
452458
async def register(
453459
self,
454460
*,
455-
purpose: object,
456-
source: object,
457-
dataset_id: object | Omit = omit,
458-
metadata: object | Omit = omit,
461+
purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
462+
source: dataset_register_params.Source,
463+
dataset_id: Optional[str] | Omit = omit,
464+
metadata: Optional[Dict[str, object]] | Omit = omit,
459465
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
460466
# The extra values given here take precedence over values defined on the client or passed to this method.
461467
extra_headers: Headers | None = None,
462468
extra_query: Query | None = None,
463469
extra_body: Body | None = None,
464470
timeout: float | httpx.Timeout | None | NotGiven = not_given,
465471
) -> DatasetRegisterResponse:
466-
"""
467-
Register a new dataset.
472+
"""Register a new dataset.
468473
469474
Args:
475+
purpose: Purpose of the dataset.
476+
477+
Each purpose has a required input data schema.
478+
479+
source: A dataset that can be obtained from a URI.
480+
470481
extra_headers: Send extra headers
471482
472483
extra_query: Add additional query parameters to the request

src/llama_stack_client/resources/scoring_functions.py

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from __future__ import annotations
1010

1111
import typing_extensions
12-
from typing import Type, cast
12+
from typing import Type, Optional, cast
1313

1414
import httpx
1515

@@ -112,12 +112,12 @@ def list(
112112
def register(
113113
self,
114114
*,
115-
description: object,
116-
return_type: object,
117-
scoring_fn_id: object,
118-
params: object | Omit = omit,
119-
provider_id: object | Omit = omit,
120-
provider_scoring_fn_id: object | Omit = omit,
115+
description: str,
116+
return_type: scoring_function_register_params.ReturnType,
117+
scoring_fn_id: str,
118+
params: Optional[scoring_function_register_params.Params] | Omit = omit,
119+
provider_id: Optional[str] | Omit = omit,
120+
provider_scoring_fn_id: Optional[str] | Omit = omit,
121121
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
122122
# The extra values given here take precedence over values defined on the client or passed to this method.
123123
extra_headers: Headers | None = None,
@@ -129,6 +129,8 @@ def register(
129129
Register a scoring function.
130130
131131
Args:
132+
params: Parameters for LLM-as-judge scoring function configuration.
133+
132134
extra_headers: Send extra headers
133135
134136
extra_query: Add additional query parameters to the request
@@ -273,12 +275,12 @@ async def list(
273275
async def register(
274276
self,
275277
*,
276-
description: object,
277-
return_type: object,
278-
scoring_fn_id: object,
279-
params: object | Omit = omit,
280-
provider_id: object | Omit = omit,
281-
provider_scoring_fn_id: object | Omit = omit,
278+
description: str,
279+
return_type: scoring_function_register_params.ReturnType,
280+
scoring_fn_id: str,
281+
params: Optional[scoring_function_register_params.Params] | Omit = omit,
282+
provider_id: Optional[str] | Omit = omit,
283+
provider_scoring_fn_id: Optional[str] | Omit = omit,
282284
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
283285
# The extra values given here take precedence over values defined on the client or passed to this method.
284286
extra_headers: Headers | None = None,
@@ -290,6 +292,8 @@ async def register(
290292
Register a scoring function.
291293
292294
Args:
295+
params: Parameters for LLM-as-judge scoring function configuration.
296+
293297
extra_headers: Send extra headers
294298
295299
extra_query: Add additional query parameters to the request

0 commit comments

Comments
 (0)