Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BUG: clip(out=...) is broken #261

Merged
merged 8 commits into from
Mar 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions array_api_compat/common/_aliases.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from typing import NamedTuple
import inspect

from ._helpers import array_namespace, _check_device, device, is_torch_array, is_cupy_namespace
from ._helpers import array_namespace, _check_device, device, is_cupy_namespace

# These functions are modified from the NumPy versions.

Expand Down Expand Up @@ -368,23 +368,23 @@ def _isscalar(a):
if type(max) is int and max >= wrapped_xp.iinfo(x.dtype).max:
max = None

dev = device(x)
if out is None:
out = wrapped_xp.asarray(xp.broadcast_to(x, result_shape),
copy=True, device=device(x))
out = wrapped_xp.empty(result_shape, dtype=x.dtype, device=dev)
out[()] = x
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When out is None I expect this to have exactly the same performance as before


if min is not None:
if is_torch_array(x) and x.dtype == xp.float64 and _isscalar(min):
# Avoid loss of precision due to torch defaulting to float32
min = wrapped_xp.asarray(min, dtype=xp.float64)
a = xp.broadcast_to(wrapped_xp.asarray(min, device=device(x)), result_shape)
a = wrapped_xp.asarray(min, dtype=x.dtype, device=dev)
a = xp.broadcast_to(a, result_shape)
ia = (out < a) | xp.isnan(a)
# torch requires an explicit cast here
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could not reproduce

out[ia] = wrapped_xp.astype(a[ia], out.dtype)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed unnecessary deep copy

out[ia] = a[ia]

if max is not None:
if is_torch_array(x) and x.dtype == xp.float64 and _isscalar(max):
max = wrapped_xp.asarray(max, dtype=xp.float64)
b = xp.broadcast_to(wrapped_xp.asarray(max, device=device(x)), result_shape)
b = wrapped_xp.asarray(max, dtype=x.dtype, device=dev)
b = xp.broadcast_to(b, result_shape)
ib = (out > b) | xp.isnan(b)
out[ib] = wrapped_xp.astype(b[ib], out.dtype)
out[ib] = b[ib]

# Return a scalar for 0-D
return out[()]

Expand Down
15 changes: 15 additions & 0 deletions tests/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,3 +367,18 @@ def test_asarray_copy(library):
assert all(b[0] == 1.0)
else:
assert all(b[0] == 0.0)


@pytest.mark.parametrize("library", ["numpy", "cupy", "torch"])
def test_clip_out(library):
"""Test non-standard out= parameter for clip()

(see "Avoid Restricting Behavior that is Outside the Scope of the Standard"
in https://data-apis.org/array-api-compat/dev/special-considerations.html)
"""
xp = import_(library, wrapper=True)
x = xp.asarray([10, 20, 30])
out = xp.zeros_like(x)
xp.clip(x, 15, 25, out=out)
expect = xp.asarray([15, 20, 25])
assert xp.all(out == expect)
Loading