Skip to content

Commit 25147b8

Browse files
committed
Remove _asarray
1 parent 02cea48 commit 25147b8

29 files changed

+134
-216
lines changed

Diff for: pytensor/misc/safe_asarray.py

-57
This file was deleted.

Diff for: pytensor/scalar/basic.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
from pytensor.graph.utils import MetaObject, MethodNotDefined
3333
from pytensor.link.c.op import COp
3434
from pytensor.link.c.type import CType
35-
from pytensor.misc.safe_asarray import _asarray
3635
from pytensor.printing import pprint
3736
from pytensor.utils import (
3837
apply_across_args,
@@ -150,7 +149,7 @@ def __call__(self, x):
150149
and rval.dtype in ("float64", "float32")
151150
and rval.dtype != config.floatX
152151
):
153-
rval = _asarray(rval, dtype=config.floatX)
152+
rval = np.asarray(rval, dtype=config.floatX)
154153
return rval
155154

156155
# The following is the original code, corresponding to the 'custom'
@@ -176,15 +175,15 @@ def __call__(self, x):
176175
and config.floatX in self.dtypes
177176
and config.floatX != "float64"
178177
):
179-
return _asarray(x, dtype=config.floatX)
178+
return np.asarray(x, dtype=config.floatX)
180179

181180
# Don't autocast to float16 unless config.floatX is float16
182181
try_dtypes = [
183182
d for d in self.dtypes if config.floatX == "float16" or d != "float16"
184183
]
185184

186185
for dtype in try_dtypes:
187-
x_ = _asarray(x, dtype=dtype)
186+
x_ = np.asarray(x).astype(dtype=dtype)
188187
if np.all(x == x_):
189188
break
190189
# returns either an exact x_==x, or the last cast x_
@@ -245,7 +244,9 @@ def convert(x, dtype=None):
245244

246245
if dtype is not None:
247246
# in this case, the semantics are that the caller is forcing the dtype
248-
x_ = _asarray(x, dtype=dtype)
247+
if dtype == "floatX":
248+
dtype = config.floatX
249+
x_ = np.asarray(x).astype(dtype)
249250
else:
250251
# In this case, this function should infer the dtype according to the
251252
# autocasting rules. See autocasting above.
@@ -256,7 +257,7 @@ def convert(x, dtype=None):
256257
except OverflowError:
257258
# This is to imitate numpy behavior which tries to fit
258259
# bigger numbers into a uint64.
259-
x_ = _asarray(x, dtype="uint64")
260+
x_ = np.asarray(x, dtype="uint64")
260261
elif isinstance(x, builtins.float):
261262
x_ = autocast_float(x)
262263
elif isinstance(x, np.ndarray):

Diff for: pytensor/sparse/basic.py

+11-12
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
from pytensor.graph.op import Op
2525
from pytensor.link.c.op import COp
2626
from pytensor.link.c.type import generic
27-
from pytensor.misc.safe_asarray import _asarray
2827
from pytensor.sparse.type import SparseTensorType, _is_sparse
2928
from pytensor.sparse.utils import hash_from_sparse
3029
from pytensor.tensor import basic as ptb
@@ -595,11 +594,11 @@ def perform(self, node, inputs, out):
595594
(csm,) = inputs
596595
out[0][0] = csm.data
597596
if str(csm.data.dtype) == "int32":
598-
out[0][0] = _asarray(out[0][0], dtype="int32")
597+
out[0][0] = np.asarray(out[0][0], dtype="int32")
599598
# backport
600-
out[1][0] = _asarray(csm.indices, dtype="int32")
601-
out[2][0] = _asarray(csm.indptr, dtype="int32")
602-
out[3][0] = _asarray(csm.shape, dtype="int32")
599+
out[1][0] = np.asarray(csm.indices, dtype="int32")
600+
out[2][0] = np.asarray(csm.indptr, dtype="int32")
601+
out[3][0] = np.asarray(csm.shape, dtype="int32")
603602

604603
def grad(self, inputs, g):
605604
# g[1:] is all integers, so their Jacobian in this op
@@ -698,17 +697,17 @@ def make_node(self, data, indices, indptr, shape):
698697

699698
if not isinstance(indices, Variable):
700699
indices_ = np.asarray(indices)
701-
indices_32 = _asarray(indices, dtype="int32")
700+
indices_32 = np.asarray(indices, dtype="int32")
702701
assert (indices_ == indices_32).all()
703702
indices = indices_32
704703
if not isinstance(indptr, Variable):
705704
indptr_ = np.asarray(indptr)
706-
indptr_32 = _asarray(indptr, dtype="int32")
705+
indptr_32 = np.asarray(indptr, dtype="int32")
707706
assert (indptr_ == indptr_32).all()
708707
indptr = indptr_32
709708
if not isinstance(shape, Variable):
710709
shape_ = np.asarray(shape)
711-
shape_32 = _asarray(shape, dtype="int32")
710+
shape_32 = np.asarray(shape, dtype="int32")
712711
assert (shape_ == shape_32).all()
713712
shape = shape_32
714713

@@ -1461,7 +1460,7 @@ def perform(self, node, inputs, outputs):
14611460
(x, ind1, ind2) = inputs
14621461
(out,) = outputs
14631462
assert _is_sparse(x)
1464-
out[0] = _asarray(x[ind1, ind2], x.dtype)
1463+
out[0] = np.asarray(x[ind1, ind2], x.dtype)
14651464

14661465

14671466
get_item_scalar = GetItemScalar()
@@ -2142,7 +2141,7 @@ def perform(self, node, inputs, outputs):
21422141

21432142
# The asarray is needed as in some case, this return a
21442143
# numpy.matrixlib.defmatrix.matrix object and not an ndarray.
2145-
out[0] = _asarray(x + y, dtype=node.outputs[0].type.dtype)
2144+
out[0] = np.asarray(x + y, dtype=node.outputs[0].type.dtype)
21462145

21472146
def grad(self, inputs, gout):
21482147
(x, y) = inputs
@@ -3497,7 +3496,7 @@ def perform(self, node, inputs, outputs):
34973496

34983497
# The cast is needed as otherwise we hit the bug mentioned into
34993498
# _asarray function documentation.
3500-
out[0] = _asarray(variable, str(variable.dtype))
3499+
out[0] = np.asarray(variable, str(variable.dtype))
35013500

35023501
def grad(self, inputs, gout):
35033502
# a is sparse, b is dense, g_out is dense
@@ -4012,7 +4011,7 @@ def perform(self, node, inputs, out):
40124011
if x_is_sparse and y_is_sparse:
40134012
rval = rval.toarray()
40144013

4015-
out[0] = _asarray(rval, dtype=node.outputs[0].dtype)
4014+
out[0] = np.asarray(rval, dtype=node.outputs[0].dtype)
40164015

40174016
def grad(self, inputs, gout):
40184017
(x, y) = inputs

Diff for: pytensor/sparse/rewriting.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import numpy as np
12
import scipy
23

34
import pytensor
@@ -10,7 +11,6 @@
1011
node_rewriter,
1112
)
1213
from pytensor.link.c.op import COp, _NoPythonCOp
13-
from pytensor.misc.safe_asarray import _asarray
1414
from pytensor.sparse import basic as sparse
1515
from pytensor.sparse.basic import (
1616
CSC,
@@ -283,7 +283,7 @@ def perform(self, node, inputs, outputs):
283283
(a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False
284284
)
285285
# out[0] = a.dot(b)
286-
out[0] = _asarray(a * b, dtype=node.outputs[0].type.dtype)
286+
out[0] = np.asarray(a * b, dtype=node.outputs[0].type.dtype)
287287
assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
288288

289289
def c_code(self, node, name, inputs, outputs, sub):

Diff for: pytensor/tensor/basic.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
from pytensor.graph.type import HasShape, Type
3333
from pytensor.link.c.op import COp
3434
from pytensor.link.c.params_type import ParamsType
35-
from pytensor.misc.safe_asarray import _asarray
3635
from pytensor.printing import Printer, min_informative_str, pprint, set_precedence
3736
from pytensor.raise_op import CheckAndRaise, assert_op
3837
from pytensor.scalar import int32
@@ -512,7 +511,7 @@ def get_underlying_scalar_constant_value(
512511
ret = v.owner.inputs[0].owner.inputs[idx]
513512
ret = get_underlying_scalar_constant_value(ret, max_recur=max_recur)
514513
# MakeVector can cast implicitly its input in some case.
515-
return _asarray(ret, dtype=v.type.dtype)
514+
return np.asarray(ret, dtype=v.type.dtype)
516515

517516
# This is needed when we take the grad as the Shape op
518517
# are not already changed into MakeVector
@@ -1834,7 +1833,7 @@ def perform(self, node, inputs, out_):
18341833
(out,) = out_
18351834
# not calling pytensor._asarray as optimization
18361835
if (out[0] is None) or (out[0].size != len(inputs)):
1837-
out[0] = _asarray(inputs, dtype=node.outputs[0].dtype)
1836+
out[0] = np.asarray(inputs, dtype=node.outputs[0].dtype)
18381837
else:
18391838
# assume that out has correct dtype. there is no cheap way to check
18401839
out[0][...] = inputs
@@ -2537,7 +2536,7 @@ def perform(self, node, axis_and_tensors, out_):
25372536
f"Join axis {int(axis)} out of bounds [0, {int(ndim)})"
25382537
)
25392538

2540-
out[0] = _asarray(
2539+
out[0] = np.asarray(
25412540
np.concatenate(tens, axis=axis), dtype=node.outputs[0].type.dtype
25422541
)
25432542

Diff for: pytensor/tensor/elemwise.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
from pytensor.link.c.op import COp, ExternalCOp, OpenMPOp
1818
from pytensor.link.c.params_type import ParamsType
1919
from pytensor.misc.frozendict import frozendict
20-
from pytensor.misc.safe_asarray import _asarray
2120
from pytensor.printing import Printer, pprint
2221
from pytensor.scalar import get_scalar_type
2322
from pytensor.scalar.basic import bool as scalar_bool
@@ -1412,7 +1411,7 @@ def perform(self, node, inp, out):
14121411

14131412
out = self.ufunc.reduce(input, axis=axis, dtype=acc_dtype)
14141413

1415-
output[0] = _asarray(out, dtype=out_dtype)
1414+
output[0] = np.asarray(out, dtype=out_dtype)
14161415

14171416
def infer_shape(self, fgraph, node, shapes):
14181417
(ishape,) = shapes

Diff for: pytensor/tensor/extra_ops.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
from pytensor.link.c.op import COp
1818
from pytensor.link.c.params_type import ParamsType
1919
from pytensor.link.c.type import EnumList, Generic
20-
from pytensor.misc.safe_asarray import _asarray
2120
from pytensor.raise_op import Assert
2221
from pytensor.scalar import int32 as int_t
2322
from pytensor.scalar import upcast
@@ -1307,7 +1306,7 @@ def perform(self, node, inp, out):
13071306
res = np.unravel_index(indices, dims, order=self.order)
13081307
assert len(res) == len(out)
13091308
for i in range(len(out)):
1310-
ret = _asarray(res[i], node.outputs[0].dtype)
1309+
ret = np.asarray(res[i], node.outputs[0].dtype)
13111310
if ret.base is not None:
13121311
# NumPy will return a view when it can.
13131312
# But we don't want that.
@@ -1382,7 +1381,7 @@ def infer_shape(self, fgraph, node, input_shapes):
13821381
def perform(self, node, inp, out):
13831382
multi_index, dims = inp[:-1], inp[-1]
13841383
res = np.ravel_multi_index(multi_index, dims, mode=self.mode, order=self.order)
1385-
out[0][0] = _asarray(res, node.outputs[0].dtype)
1384+
out[0][0] = np.asarray(res, node.outputs[0].dtype)
13861385

13871386

13881387
def ravel_multi_index(multi_index, dims, mode="raise", order="C"):

Diff for: pytensor/tensor/math.py

+15-16
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
from pytensor.graph.replace import _vectorize_node
1515
from pytensor.link.c.op import COp
1616
from pytensor.link.c.params_type import ParamsType
17-
from pytensor.misc.safe_asarray import _asarray
1817
from pytensor.printing import pprint
1918
from pytensor.raise_op import Assert
2019
from pytensor.scalar.basic import BinaryScalarOp
@@ -202,7 +201,7 @@ def perform(self, node, inp, outs):
202201
new_shape = (*kept_shape, np.prod(reduced_shape, dtype="int64"))
203202
reshaped_x = transposed_x.reshape(new_shape)
204203

205-
max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype="int64")
204+
max_idx[0] = np.asarray(np.argmax(reshaped_x, axis=-1), dtype="int64")
206205

207206
def c_code(self, node, name, inp, out, sub):
208207
(x,) = inp
@@ -730,32 +729,32 @@ def isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
730729
--------
731730
>>> import pytensor
732731
>>> import numpy as np
733-
>>> a = _asarray([1e10, 1e-7], dtype="float64")
734-
>>> b = _asarray([1.00001e10, 1e-8], dtype="float64")
732+
>>> a = np.array([1e10, 1e-7], dtype="float64")
733+
>>> b = np.array([1.00001e10, 1e-8], dtype="float64")
735734
>>> pytensor.tensor.isclose(a, b).eval()
736735
array([ True, False])
737-
>>> a = _asarray([1e10, 1e-8], dtype="float64")
738-
>>> b = _asarray([1.00001e10, 1e-9], dtype="float64")
736+
>>> a = np.array([1e10, 1e-8], dtype="float64")
737+
>>> b = np.array([1.00001e10, 1e-9], dtype="float64")
739738
>>> pytensor.tensor.isclose(a, b).eval()
740739
array([ True, True])
741-
>>> a = _asarray([1e10, 1e-8], dtype="float64")
742-
>>> b = _asarray([1.0001e10, 1e-9], dtype="float64")
740+
>>> a = np.array([1e10, 1e-8], dtype="float64")
741+
>>> b = np.array([1.0001e10, 1e-9], dtype="float64")
743742
>>> pytensor.tensor.isclose(a, b).eval()
744743
array([False, True])
745-
>>> a = _asarray([1.0, np.nan], dtype="float64")
746-
>>> b = _asarray([1.0, np.nan], dtype="float64")
744+
>>> a = np.array([1.0, np.nan], dtype="float64")
745+
>>> b = np.array([1.0, np.nan], dtype="float64")
747746
>>> pytensor.tensor.isclose(a, b).eval()
748747
array([ True, False])
749-
>>> a = _asarray([1.0, np.nan], dtype="float64")
750-
>>> b = _asarray([1.0, np.nan], dtype="float64")
748+
>>> a = np.array([1.0, np.nan], dtype="float64")
749+
>>> b = np.array([1.0, np.nan], dtype="float64")
751750
>>> pytensor.tensor.isclose(a, b, equal_nan=True).eval()
752751
array([ True, True])
753-
>>> a = _asarray([1.0, np.inf], dtype="float64")
754-
>>> b = _asarray([1.0, -np.inf], dtype="float64")
752+
>>> a = np.array([1.0, np.inf], dtype="float64")
753+
>>> b = np.array([1.0, -np.inf], dtype="float64")
755754
>>> pytensor.tensor.isclose(a, b).eval()
756755
array([ True, False])
757-
>>> a = _asarray([1.0, np.inf], dtype="float64")
758-
>>> b = _asarray([1.0, np.inf], dtype="float64")
756+
>>> a = np.array([1.0, np.inf], dtype="float64")
757+
>>> b = np.array([1.0, np.inf], dtype="float64")
759758
>>> pytensor.tensor.isclose(a, b).eval()
760759
array([ True, True])
761760

Diff for: pytensor/tensor/rewriting/math.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
node_rewriter,
2020
)
2121
from pytensor.graph.rewriting.utils import get_clients_at_depth
22-
from pytensor.misc.safe_asarray import _asarray
2322
from pytensor.raise_op import assert_op
2423
from pytensor.tensor.basic import (
2524
Alloc,
@@ -1205,7 +1204,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None):
12051204
out_dtype = ps.upcast(*[v.dtype for v in (num + denum)])
12061205
else:
12071206
out_dtype = out_type.dtype
1208-
one = _asarray(1, dtype=out_dtype)
1207+
one = np.asarray(1, dtype=out_dtype)
12091208

12101209
v = reduce(np.multiply, num, one) / reduce(np.multiply, denum, one)
12111210
if aslist:
@@ -1878,7 +1877,7 @@ def local_mul_zero(fgraph, node):
18781877
# print 'MUL by value', value, node.inputs
18791878
if value == 0:
18801879
# print '... returning zeros'
1881-
return [broadcast_arrays(_asarray(0, dtype=otype.dtype), *node.inputs)[0]]
1880+
return [broadcast_arrays(np.asarray(0, dtype=otype.dtype), *node.inputs)[0]]
18821881

18831882

18841883
# TODO: Add this to the canonicalization to reduce redundancy.
@@ -2353,8 +2352,8 @@ def add_calculate(num, denum, aslist=False, out_type=None):
23532352
if out_type is None:
23542353
zero = 0.0
23552354
else:
2356-
zero = _asarray(0, dtype=out_type.dtype)
2357-
# zero = 0.0 if out_type is None else _asarray(0,
2355+
zero = np.asarray(0, dtype=out_type.dtype)
2356+
# zero = 0.0 if out_type is None else np.asarray(0,
23582357
# dtype=out_type.dtype)
23592358
if out_type and out_type.dtype == "bool":
23602359
if len(denum) == 0:

0 commit comments

Comments
 (0)