@@ -148,7 +148,7 @@ def test_groupby_reduce(
148
148
)
149
149
# we use pd.Index(expected_groups).to_numpy() which is always int64
150
150
# for the values in this tests
151
- g_dtype = by .dtype if expected_groups is None else np .int64
151
+ g_dtype = by .dtype if expected_groups is None else np .intp
152
152
153
153
assert_equal (groups , np .array ([0 , 1 , 2 ], g_dtype ))
154
154
assert_equal (expected_result , result )
@@ -389,12 +389,12 @@ def test_groupby_agg_dask(func, shape, array_chunks, group_chunks, add_nan, dtyp
389
389
kwargs ["expected_groups" ] = [0 , 2 , 1 ]
390
390
with raise_if_dask_computes ():
391
391
actual , groups = groupby_reduce (array , by , engine = engine , ** kwargs , sort = False )
392
- assert_equal (groups , np .array ([0 , 2 , 1 ], dtype = np .int64 ))
392
+ assert_equal (groups , np .array ([0 , 2 , 1 ], dtype = np .intp ))
393
393
assert_equal (expected , actual [..., [0 , 2 , 1 ]])
394
394
395
395
with raise_if_dask_computes ():
396
396
actual , groups = groupby_reduce (array , by , engine = engine , ** kwargs , sort = True )
397
- assert_equal (groups , np .array ([0 , 1 , 2 ], np .int64 ))
397
+ assert_equal (groups , np .array ([0 , 1 , 2 ], np .intp ))
398
398
assert_equal (expected , actual )
399
399
400
400
@@ -784,10 +784,8 @@ def test_dtype_preservation(dtype, func, engine):
784
784
785
785
786
786
@requires_dask
787
- @pytest .mark .parametrize ("dtype" , [np .int32 , np .int64 ])
788
- @pytest .mark .parametrize (
789
- "labels_dtype" , [pytest .param (np .int32 , marks = pytest .mark .xfail ), np .int64 ]
790
- )
787
+ @pytest .mark .parametrize ("dtype" , [np .float32 , np .float64 , np .int32 , np .int64 ])
788
+ @pytest .mark .parametrize ("labels_dtype" , [np .float32 , np .float64 , np .int32 , np .int64 ])
791
789
@pytest .mark .parametrize ("method" , ["map-reduce" , "cohorts" ])
792
790
def test_cohorts_map_reduce_consistent_dtypes (method , dtype , labels_dtype ):
793
791
repeats = np .array ([4 , 4 , 12 , 2 , 3 , 4 ], dtype = np .int32 )
0 commit comments