Skip to content

Commit 291e5f3

Browse files
committed
Add core_tests/test_ufunc_methods.py
1 parent d3d5ab7 commit 291e5f3

File tree

1 file changed

+246
-0
lines changed

1 file changed

+246
-0
lines changed
Lines changed: 246 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,246 @@
1+
import numpy
2+
import pytest
3+
4+
import dpnp as cupy
5+
from dpnp.tests.third_party.cupy import testing
6+
7+
8+
class TestUfuncOuter:
9+
10+
@testing.numpy_cupy_array_equal()
11+
def test_add_outer(self, xp):
12+
x = testing.shaped_random((2, 3), xp=xp, dtype=numpy.int32, seed=0)
13+
y = testing.shaped_random((4, 1, 5), xp=xp, dtype=numpy.int32, seed=1)
14+
return xp.add.outer(x, y)
15+
16+
@pytest.mark.skip("Scalar input is not supported")
17+
@testing.numpy_cupy_array_equal()
18+
def test_add_outer_scalar(self, xp):
19+
return xp.add.outer(2, 3)
20+
21+
22+
@pytest.mark.skip("at() method is not supported")
23+
class TestUfuncAtAtomicOps:
24+
25+
@testing.for_dtypes("iIQefd")
26+
@testing.numpy_cupy_array_equal()
27+
def test_at_add(self, xp, dtype):
28+
if cupy.cuda.runtime.is_hip and dtype == numpy.float16:
29+
pytest.skip("atomicAdd does not support float16 in HIP")
30+
shape = (50,)
31+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
32+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
33+
indices = xp.nonzero(mask)[0]
34+
xp.add.at(x, indices, 3)
35+
return x
36+
37+
@testing.for_dtypes("iIQefd")
38+
@testing.numpy_cupy_array_equal()
39+
def test_at_add_duplicate_indices(self, xp, dtype):
40+
if cupy.cuda.runtime.is_hip and dtype == numpy.float16:
41+
pytest.skip("atomicAdd does not support float16 in HIP")
42+
shape = (50,)
43+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
44+
indices = testing.shaped_random(
45+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
46+
)
47+
xp.add.at(x, indices, 3)
48+
return x
49+
50+
@testing.for_dtypes("iI")
51+
@testing.numpy_cupy_array_equal()
52+
def test_at_subtract(self, xp, dtype):
53+
shape = (50,)
54+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
55+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
56+
indices = xp.nonzero(mask)[0]
57+
xp.subtract.at(x, indices, 3)
58+
return x
59+
60+
@testing.for_dtypes("iI")
61+
@testing.numpy_cupy_array_equal()
62+
def test_at_subtract_duplicate_indices(self, xp, dtype):
63+
shape = (50,)
64+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
65+
indices = testing.shaped_random(
66+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
67+
)
68+
xp.subtract.at(x, indices, 3)
69+
return x
70+
71+
@testing.for_dtypes("iIQfd")
72+
@testing.numpy_cupy_allclose()
73+
def test_at_min(self, xp, dtype):
74+
shape = (50,)
75+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
76+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
77+
indices = xp.nonzero(mask)[0]
78+
xp.minimum.at(x, indices, 3)
79+
return x
80+
81+
@testing.for_dtypes("iIQfd")
82+
@testing.numpy_cupy_allclose()
83+
def test_at_min_duplicate_indices(self, xp, dtype):
84+
shape = (50,)
85+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
86+
indices = testing.shaped_random(
87+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
88+
)
89+
values = testing.shaped_random(
90+
indices.shape, xp=xp, dtype=dtype, seed=2
91+
)
92+
xp.minimum.at(x, indices, values)
93+
return x
94+
95+
@testing.for_dtypes("iIQfd")
96+
@testing.numpy_cupy_allclose()
97+
def test_at_max(self, xp, dtype):
98+
shape = (50,)
99+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
100+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
101+
indices = xp.nonzero(mask)[0]
102+
xp.maximum.at(x, indices, 3)
103+
return x
104+
105+
@testing.for_dtypes("iIQfd")
106+
@testing.numpy_cupy_allclose()
107+
def test_at_max_duplicate_indices(self, xp, dtype):
108+
shape = (50,)
109+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
110+
indices = testing.shaped_random(
111+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
112+
)
113+
values = testing.shaped_random(
114+
indices.shape, xp=xp, dtype=dtype, seed=2
115+
)
116+
xp.maximum.at(x, indices, values)
117+
return x
118+
119+
@testing.for_dtypes("iIlLqQ")
120+
@testing.numpy_cupy_array_equal()
121+
def test_at_bitwise_and(self, xp, dtype):
122+
if cupy.cuda.runtime.is_hip and numpy.dtype(dtype).char in "lq":
123+
pytest.skip("atomicOr does not support int64 in HIP")
124+
shape = (50,)
125+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
126+
indices = testing.shaped_random(
127+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
128+
)
129+
values = testing.shaped_random(
130+
indices.shape, xp=xp, dtype=dtype, seed=2
131+
)
132+
xp.bitwise_and.at(x, indices, values)
133+
return x
134+
135+
@testing.for_dtypes("iIlLqQ")
136+
@testing.numpy_cupy_array_equal()
137+
def test_at_bitwise_or(self, xp, dtype):
138+
if cupy.cuda.runtime.is_hip and numpy.dtype(dtype).char in "lq":
139+
pytest.skip("atomicOr does not support int64 in HIP")
140+
shape = (50,)
141+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
142+
indices = testing.shaped_random(
143+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
144+
)
145+
values = testing.shaped_random(
146+
indices.shape, xp=xp, dtype=dtype, seed=2
147+
)
148+
xp.bitwise_or.at(x, indices, values)
149+
return x
150+
151+
@testing.for_dtypes("iIlLqQ")
152+
@testing.numpy_cupy_array_equal()
153+
def test_at_bitwise_xor(self, xp, dtype):
154+
if cupy.cuda.runtime.is_hip and numpy.dtype(dtype).char in "lq":
155+
pytest.skip("atomicXor does not support int64 in HIP")
156+
shape = (50,)
157+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
158+
indices = testing.shaped_random(
159+
shape, xp=xp, dtype=numpy.int32, scale=shape[0], seed=1
160+
)
161+
values = testing.shaped_random(
162+
indices.shape, xp=xp, dtype=dtype, seed=2
163+
)
164+
xp.bitwise_xor.at(x, indices, values)
165+
return x
166+
167+
@testing.for_dtypes("iIQefd")
168+
@testing.numpy_cupy_array_equal()
169+
def test_at_boolean_mask(self, xp, dtype):
170+
shape = (50,)
171+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
172+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
173+
xp.add.at(x, mask, 3)
174+
return x
175+
176+
@testing.for_dtypes("iIQefd")
177+
@testing.numpy_cupy_array_equal()
178+
def test_at_array_values(self, xp, dtype):
179+
if cupy.cuda.runtime.is_hip and dtype == numpy.float16:
180+
pytest.skip("atomicAdd does not support float16 in HIP")
181+
shape = (50,)
182+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
183+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
184+
indices = xp.nonzero(mask)[0]
185+
values = testing.shaped_random(
186+
indices.shape, xp=xp, dtype=numpy.int32, seed=2
187+
)
188+
xp.add.at(x, indices, values)
189+
return x
190+
191+
@testing.for_dtypes("iIQefd")
192+
@testing.numpy_cupy_array_equal()
193+
def test_at_multi_dimensional(self, xp, dtype):
194+
if cupy.cuda.runtime.is_hip and dtype == numpy.float16:
195+
pytest.skip("atomicAdd does not support float16 in HIP")
196+
shape = (20, 30)
197+
x = testing.shaped_random(shape, xp=xp, dtype=dtype, seed=0)
198+
mask = testing.shaped_random(shape, xp=xp, dtype=bool, seed=1)
199+
indices = xp.nonzero(mask)
200+
xp.add.at(x, indices, 3)
201+
return x
202+
203+
204+
@pytest.mark.skip("reduce() method is not supported")
205+
class TestUfuncReduce:
206+
207+
@testing.for_all_dtypes()
208+
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6})
209+
def test_reduce_add(self, xp, dtype):
210+
x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0)
211+
return xp.add.reduce(x, axis=-1)
212+
213+
@testing.for_all_dtypes()
214+
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6})
215+
def test_multiply_add(self, xp, dtype):
216+
x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0)
217+
return xp.multiply.reduce(x, axis=-1)
218+
219+
220+
@pytest.mark.skip("accumulate() method is not supported")
221+
class TestUfuncAccumulate:
222+
223+
@testing.for_all_dtypes()
224+
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6})
225+
def test_reduce_add(self, xp, dtype):
226+
x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0)
227+
return xp.add.accumulate(x, axis=-1)
228+
229+
@testing.for_all_dtypes()
230+
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, "default": 1e-6})
231+
def test_multiply_add(self, xp, dtype):
232+
x = testing.shaped_random((3, 4), xp=xp, dtype=dtype, seed=0)
233+
return xp.multiply.accumulate(x, axis=-1)
234+
235+
236+
@pytest.mark.skip("reduceat() method is not supported")
237+
class TestUfuncReduceAt:
238+
239+
@testing.for_all_dtypes(no_float16=True)
240+
@testing.numpy_cupy_allclose(rtol=1e-6)
241+
def test_reduce_add(self, xp, dtype):
242+
x = testing.shaped_random((3, 4, 5), xp=xp, dtype=dtype, seed=0)
243+
indices = testing.shaped_random(
244+
(20,), xp=xp, dtype=numpy.int32, scale=4, seed=1
245+
)
246+
return xp.add.reduceat(x, indices, axis=1)

0 commit comments

Comments
 (0)