Skip to content

Commit 74b98eb

Browse files
authored
sync stainless 0.1.3 cut (#144)
# What does this PR do? - as title ## Test Plan ``` pytest -v -s --nbval-lax ./docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb ``` <img width="687" alt="image" src="https://github.com/user-attachments/assets/ef291dc2-a822-4ed5-a3fc-956703a3772f" /> [//]: # (## Documentation) [//]: # (- [ ] Added a Changelog entry if the change is significant)
1 parent 8652757 commit 74b98eb

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+1996
-221
lines changed

src/llama_stack_client/_client.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
providers,
4040
telemetry,
4141
vector_io,
42+
benchmarks,
4243
eval_tasks,
4344
toolgroups,
4445
vector_dbs,
@@ -94,6 +95,7 @@ class LlamaStackClient(SyncAPIClient):
9495
scoring: scoring.ScoringResource
9596
scoring_functions: scoring_functions.ScoringFunctionsResource
9697
eval_tasks: eval_tasks.EvalTasksResource
98+
benchmarks: benchmarks.BenchmarksResource
9799
with_raw_response: LlamaStackClientWithRawResponse
98100
with_streaming_response: LlamaStackClientWithStreamedResponse
99101

@@ -176,6 +178,7 @@ def __init__(
176178
self.scoring = scoring.ScoringResource(self)
177179
self.scoring_functions = scoring_functions.ScoringFunctionsResource(self)
178180
self.eval_tasks = eval_tasks.EvalTasksResource(self)
181+
self.benchmarks = benchmarks.BenchmarksResource(self)
179182
self.with_raw_response = LlamaStackClientWithRawResponse(self)
180183
self.with_streaming_response = LlamaStackClientWithStreamedResponse(self)
181184

@@ -310,6 +313,7 @@ class AsyncLlamaStackClient(AsyncAPIClient):
310313
scoring: scoring.AsyncScoringResource
311314
scoring_functions: scoring_functions.AsyncScoringFunctionsResource
312315
eval_tasks: eval_tasks.AsyncEvalTasksResource
316+
benchmarks: benchmarks.AsyncBenchmarksResource
313317
with_raw_response: AsyncLlamaStackClientWithRawResponse
314318
with_streaming_response: AsyncLlamaStackClientWithStreamedResponse
315319

@@ -392,6 +396,7 @@ def __init__(
392396
self.scoring = scoring.AsyncScoringResource(self)
393397
self.scoring_functions = scoring_functions.AsyncScoringFunctionsResource(self)
394398
self.eval_tasks = eval_tasks.AsyncEvalTasksResource(self)
399+
self.benchmarks = benchmarks.AsyncBenchmarksResource(self)
395400
self.with_raw_response = AsyncLlamaStackClientWithRawResponse(self)
396401
self.with_streaming_response = AsyncLlamaStackClientWithStreamedResponse(self)
397402

@@ -529,6 +534,7 @@ def __init__(self, client: LlamaStackClient) -> None:
529534
self.scoring = scoring.ScoringResourceWithRawResponse(client.scoring)
530535
self.scoring_functions = scoring_functions.ScoringFunctionsResourceWithRawResponse(client.scoring_functions)
531536
self.eval_tasks = eval_tasks.EvalTasksResourceWithRawResponse(client.eval_tasks)
537+
self.benchmarks = benchmarks.BenchmarksResourceWithRawResponse(client.benchmarks)
532538

533539

534540
class AsyncLlamaStackClientWithRawResponse:
@@ -560,6 +566,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
560566
client.scoring_functions
561567
)
562568
self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithRawResponse(client.eval_tasks)
569+
self.benchmarks = benchmarks.AsyncBenchmarksResourceWithRawResponse(client.benchmarks)
563570

564571

565572
class LlamaStackClientWithStreamedResponse:
@@ -591,6 +598,7 @@ def __init__(self, client: LlamaStackClient) -> None:
591598
client.scoring_functions
592599
)
593600
self.eval_tasks = eval_tasks.EvalTasksResourceWithStreamingResponse(client.eval_tasks)
601+
self.benchmarks = benchmarks.BenchmarksResourceWithStreamingResponse(client.benchmarks)
594602

595603

596604
class AsyncLlamaStackClientWithStreamedResponse:
@@ -624,6 +632,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
624632
client.scoring_functions
625633
)
626634
self.eval_tasks = eval_tasks.AsyncEvalTasksResourceWithStreamingResponse(client.eval_tasks)
635+
self.benchmarks = benchmarks.AsyncBenchmarksResourceWithStreamingResponse(client.benchmarks)
627636

628637

629638
Client = LlamaStackClient

src/llama_stack_client/_decoders/jsonl.py

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,29 @@ class JSONLDecoder(Generic[_T]):
1717
into a given type.
1818
"""
1919

20-
http_response: httpx.Response | None
20+
http_response: httpx.Response
2121
"""The HTTP response this decoder was constructed from"""
2222

2323
def __init__(
24-
self, *, raw_iterator: Iterator[bytes], line_type: type[_T], http_response: httpx.Response | None
24+
self,
25+
*,
26+
raw_iterator: Iterator[bytes],
27+
line_type: type[_T],
28+
http_response: httpx.Response,
2529
) -> None:
2630
super().__init__()
2731
self.http_response = http_response
2832
self._raw_iterator = raw_iterator
2933
self._line_type = line_type
3034
self._iterator = self.__decode__()
3135

36+
def close(self) -> None:
37+
"""Close the response body stream.
38+
39+
This is called automatically if you consume the entire stream.
40+
"""
41+
self.http_response.close()
42+
3243
def __decode__(self) -> Iterator[_T]:
3344
buf = b""
3445
for chunk in self._raw_iterator:
@@ -63,17 +74,28 @@ class AsyncJSONLDecoder(Generic[_T]):
6374
into a given type.
6475
"""
6576

66-
http_response: httpx.Response | None
77+
http_response: httpx.Response
6778

6879
def __init__(
69-
self, *, raw_iterator: AsyncIterator[bytes], line_type: type[_T], http_response: httpx.Response | None
80+
self,
81+
*,
82+
raw_iterator: AsyncIterator[bytes],
83+
line_type: type[_T],
84+
http_response: httpx.Response,
7085
) -> None:
7186
super().__init__()
7287
self.http_response = http_response
7388
self._raw_iterator = raw_iterator
7489
self._line_type = line_type
7590
self._iterator = self.__decode__()
7691

92+
async def close(self) -> None:
93+
"""Close the response body stream.
94+
95+
This is called automatically if you consume the entire stream.
96+
"""
97+
await self.http_response.aclose()
98+
7799
async def __decode__(self) -> AsyncIterator[_T]:
78100
buf = b""
79101
async for chunk in self._raw_iterator:

src/llama_stack_client/_models.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,10 +426,16 @@ def construct_type(*, value: object, type_: object) -> object:
426426
427427
If the given value does not match the expected type then it is returned as-is.
428428
"""
429+
430+
# store a reference to the original type we were given before we extract any inner
431+
# types so that we can properly resolve forward references in `TypeAliasType` annotations
432+
original_type = None
433+
429434
# we allow `object` as the input type because otherwise, passing things like
430435
# `Literal['value']` will be reported as a type error by type checkers
431436
type_ = cast("type[object]", type_)
432437
if is_type_alias_type(type_):
438+
original_type = type_ # type: ignore[unreachable]
433439
type_ = type_.__value__ # type: ignore[unreachable]
434440

435441
# unwrap `Annotated[T, ...]` -> `T`
@@ -446,7 +452,7 @@ def construct_type(*, value: object, type_: object) -> object:
446452

447453
if is_union(origin):
448454
try:
449-
return validate_type(type_=cast("type[object]", type_), value=value)
455+
return validate_type(type_=cast("type[object]", original_type or type_), value=value)
450456
except Exception:
451457
pass
452458

src/llama_stack_client/_response.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
144144
return cast(
145145
R,
146146
cast("type[JSONLDecoder[Any]]", cast_to)(
147-
raw_iterator=self.http_response.iter_bytes(chunk_size=4096),
147+
raw_iterator=self.http_response.iter_bytes(chunk_size=64),
148148
line_type=extract_type_arg(cast_to, 0),
149149
http_response=self.http_response,
150150
),
@@ -154,7 +154,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
154154
return cast(
155155
R,
156156
cast("type[AsyncJSONLDecoder[Any]]", cast_to)(
157-
raw_iterator=self.http_response.aiter_bytes(chunk_size=4096),
157+
raw_iterator=self.http_response.aiter_bytes(chunk_size=64),
158158
line_type=extract_type_arg(cast_to, 0),
159159
http_response=self.http_response,
160160
),

src/llama_stack_client/_utils/_transform.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
is_annotated_type,
2626
strip_annotated_type,
2727
)
28-
from .._compat import model_dump, is_typeddict
28+
from .._compat import get_origin, model_dump, is_typeddict
2929

3030
_T = TypeVar("_T")
3131

@@ -164,9 +164,14 @@ def _transform_recursive(
164164
inner_type = annotation
165165

166166
stripped_type = strip_annotated_type(inner_type)
167+
origin = get_origin(stripped_type) or stripped_type
167168
if is_typeddict(stripped_type) and is_mapping(data):
168169
return _transform_typeddict(data, stripped_type)
169170

171+
if origin == dict and is_mapping(data):
172+
items_type = get_args(stripped_type)[1]
173+
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
174+
170175
if (
171176
# List[T]
172177
(is_list_type(stripped_type) and is_list(data))
@@ -307,9 +312,14 @@ async def _async_transform_recursive(
307312
inner_type = annotation
308313

309314
stripped_type = strip_annotated_type(inner_type)
315+
origin = get_origin(stripped_type) or stripped_type
310316
if is_typeddict(stripped_type) and is_mapping(data):
311317
return await _async_transform_typeddict(data, stripped_type)
312318

319+
if origin == dict and is_mapping(data):
320+
items_type = get_args(stripped_type)[1]
321+
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
322+
313323
if (
314324
# List[T]
315325
(is_list_type(stripped_type) and is_list(data))

src/llama_stack_client/resources/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,14 @@
120120
VectorIoResourceWithStreamingResponse,
121121
AsyncVectorIoResourceWithStreamingResponse,
122122
)
123+
from .benchmarks import (
124+
BenchmarksResource,
125+
AsyncBenchmarksResource,
126+
BenchmarksResourceWithRawResponse,
127+
AsyncBenchmarksResourceWithRawResponse,
128+
BenchmarksResourceWithStreamingResponse,
129+
AsyncBenchmarksResourceWithStreamingResponse,
130+
)
123131
from .eval_tasks import (
124132
EvalTasksResource,
125133
AsyncEvalTasksResource,
@@ -324,4 +332,10 @@
324332
"AsyncEvalTasksResourceWithRawResponse",
325333
"EvalTasksResourceWithStreamingResponse",
326334
"AsyncEvalTasksResourceWithStreamingResponse",
335+
"BenchmarksResource",
336+
"AsyncBenchmarksResource",
337+
"BenchmarksResourceWithRawResponse",
338+
"AsyncBenchmarksResourceWithRawResponse",
339+
"BenchmarksResourceWithStreamingResponse",
340+
"AsyncBenchmarksResourceWithStreamingResponse",
327341
]

0 commit comments

Comments
 (0)