forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest_functionalization.py
676 lines (599 loc) · 25.9 KB
/
test_functionalization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
# Owner(s): ["module: codegen"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, capture_logs, log_input
from torch.utils._pytree import tree_map
import logging
def are_aliased(x, y):
if x._base is None and y._base is None:
return False
if x._base is not None and y._base is None:
return x._base is y
if x._base is None and y._base is not None:
return y._base is x
return x._base is y._base
# Just for testing: a logging tensor that also transforms out-of-place ops into inplace ops.
# That way even if the outer wrapper is functionalized, the inner wrapper will also need functionalization.
class InplaceLoggingTensor(LoggingTensorReentrant):
@staticmethod
def __new__(cls, e):
r = torch.Tensor._make_wrapper_subclass(cls, e.shape, dtype=e.dtype, requires_grad=False)
r.elem = e
return r
__torch_function__ = torch._C._disabled_torch_function_impl
def __str__(self):
return f'InplaceLoggingTensor({self.elem})'
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
if isinstance(e, InplaceLoggingTensor):
return e.elem
else:
return e
def wrap(e):
if isinstance(e, torch.Tensor):
return InplaceLoggingTensor(e)
else:
return e
f = func
# this subclass converts all `add()` ops into `add_()` ops
if f is torch.ops.aten.add.Tensor:
f = torch.ops.aten.add_.Tensor
with cls.context():
rs = tree_map(wrap, f(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)))
# after running the (potentially transformed) op,
# log the original op that we saw.
logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs)
return rs
class TestFunctionalization(TestCase):
def get_logs(self, func, inpt, *, reapply_views=False):
input_clone_logging = LoggingTensor(inpt.clone())
input_functional_logging = torch._to_functional_tensor(input_clone_logging)
with capture_logs() as logs:
log_input("input", input_clone_logging)
torch._enable_functionalization(reapply_views=reapply_views)
try:
func(input_functional_logging)
finally:
torch._disable_functionalization()
return logs
def assert_functionalization(self, func, inpt, *, reapply_views=False):
input_clone = inpt.clone()
input_clone2 = inpt.clone()
input_functional = torch._to_functional_tensor(input_clone2)
# Compare outputs (and mutated inputs), with and without functionalization.
out_ref = func(inpt)
torch._enable_functionalization(reapply_views=reapply_views)
try:
out_functional = func(input_functional)
finally:
torch._disable_functionalization()
# We need to sync the input tensors first, in case there are any queued mutations left.
torch._sync(input_functional)
self.assertEqual(inpt, torch._from_functional_tensor(input_functional)) # input mutations should still occur
# Handle tests with multi-tensor outputs
if isinstance(out_ref, tuple) and isinstance(out_functional, tuple):
out_refs, out_functionals = list(out_ref), list(out_functional)
else:
out_refs, out_functionals = [out_ref], [out_functional]
for out_ref_, out_functional_ in zip(out_refs, out_functionals):
self.assertEqual(out_ref_.size(), out_functional_.size())
torch._sync(out_functional_)
out_functional_unwrapped = torch._from_functional_tensor(out_functional_)
self.assertEqual(out_ref_, out_functional_unwrapped)
def test_multiple_views_of_same_base(self):
def f(x):
y = x.view(-1)
z = x.view(-1)
x.add_(1)
# y should have been updated.
y2 = y + 1
# z should have been updated too.
z2 = z + 1
return z2
self.assert_functionalization(f, torch.ones(4))
def test_simple(self):
def f(x):
# simple test: 1 view op, 1 inplace op
tmp = torch.ones(4, 2)
y = x.view(4, 2)
y.add_(tmp)
z = x * x
return y
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view_copy.default($0, [4, 2])
$2 = torch._ops.aten.add.Tensor($1, tensor([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.]]))
$3 = torch._ops.aten.view_copy.default($2, [4, 2])
$4 = torch._ops.aten.mul.Tensor($3, $3)""")
def test_simple_out(self):
def f(x):
tmp = torch.ones(4, 2)
y = x.view(4, 2)
# the out= tensor will get resized, since it has size=0 to start.
z = torch.empty(())
torch.add(y, tmp, out=z)
w = z * z
return w
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view_copy.default($0, [4, 2])
$2 = torch._ops.aten.add.Tensor($1, tensor([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.]]))
$3 = torch._ops.aten.mul.Tensor($2, $2)""")
def test_multi_out(self):
def f(x):
# aminmax.out returns a tuple of tensors.
# functionalization should properly handle the tuple.
out_min = torch.empty(4)
out_max = torch.empty(4)
torch.aminmax(x, dim=0, out=(out_max, out_min))
return out_max
self.assert_functionalization(f, torch.arange(8, dtype=torch.float32))
logs = self.get_logs(f, torch.arange(8, dtype=torch.float32))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1, $2 = torch._ops.aten.aminmax.default($0, dim=0)""")
def test_tensor_ctr(self):
def f(x):
y = torch.tensor((1, 2, 3))
z = y.view(-1)
z.add_(1)
return y
self.assert_functionalization(f, torch.arange(3, dtype=torch.float32))
def test_inplace_on_non_view(self):
def f(x):
# test for the case where we functionalize an inplace op on the other tensor - not a view.
# This is worth checking because the tensor will have an empty ViewMeta stack, which needs to be special cased.
tmp = torch.ones(4, 2)
y = x.view(4, 2)
x.add_(tmp)
return y
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view_copy.default($0, [4, 2])
$2 = torch._ops.aten.add.Tensor($0, tensor([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.]]))""")
# Some ops that are mutable are neither inplace nor out= ops.
# They also need special handling.
def test_mutable_op_not_inplace_or_other(self):
def f(x):
return torch._fused_moving_avg_obs_fq_helper(x, x, x, x, x, x, x, 1.0, 0, 1, 0)
logs = self.get_logs(f, torch.ones(1))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1, $2, $3, $4, $5, $6 = torch._ops.aten._fused_moving_avg_obs_fq_helper.functional($0, $0, $0, $0, $0, $0, $0, 1.0, 0, 1, 0)""")
def test_as_strided(self):
def f(x):
y = x.as_strided((2,), (2,), 1)
y.add_(1)
return x
self.assert_functionalization(f, torch.ones(9))
logs = self.get_logs(f, torch.ones(9))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.as_strided_copy.default($0, [2], [2], 1)
$2 = torch._ops.aten.add.Tensor($1, 1)""")
def test_tensor_list_composite(self):
def f(x):
# Test an op with TensorList input
y = torch.block_diag(x, x)
return y
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.block_diag.default([LoggingTensor(tensor([[1., 1.],
[1., 1.]])), LoggingTensor(tensor([[1., 1.],
[1., 1.]]))])""")
def test_cat(self):
def f(x):
out = torch.empty(0)
torch.cat((x,), out=out)
return out
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.cat.default([LoggingTensor(tensor([[1., 1.],
[1., 1.]]))])""")
def test_diagonal(self):
def f(x):
# test: view ops that take a subset of the original tensor (select/diagonal)
tmp = torch.ones(2)
y = x.diagonal()
y.add_(tmp)
z = x * x
return z
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.diagonal_copy.default($0)
$2 = torch._ops.aten.add.Tensor($1, tensor([1., 1.]))
$3 = torch._ops.aten.diagonal_scatter.default($0, $2)
$4 = torch._ops.aten.mul.Tensor($3, $3)""")
def test_diagonal_mutated_input(self):
def f(x):
# simple test: there are pending updates afterwards, which the test syncs manually
tmp = torch.ones(2)
y = x.diagonal()
y.add_(tmp)
return x
x = torch.ones(2, 2)
self.assert_functionalization(f, x)
def test_split(self):
def f(x):
# test: view ops that return multiple tensors (split)
tmp = torch.ones(2)
y1, y2 = x.split(2)
y3 = y2.diagonal()
y3.add_(tmp)
z = x * x
return y3
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1, $2 = torch._ops.aten.split_copy.Tensor($0, 2)
$3 = torch._ops.aten.diagonal_copy.default($2)
$4 = torch._ops.aten.add.Tensor($3, tensor([1., 1.]))
$5, $6 = torch._ops.aten.split_copy.Tensor($0, 2)
$7 = torch._ops.aten.diagonal_scatter.default($6, $4)
$8 = torch._ops.aten.slice_scatter.default($0, $7, 0, 2, 4)
$9 = torch._ops.aten.mul.Tensor($8, $8)""")
def test_view_inplace(self):
def f(x):
# test: view + inplace op (transpose_)
tmp = torch.ones(4)
x.transpose_(1, 0)
y = x[0]
y.add_(tmp)
return x
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.transpose_copy.int($0, 1, 0)
$2 = torch._ops.aten.select_copy.int($1, 0, 0)
$3 = torch._ops.aten.add.Tensor($2, tensor([1., 1., 1., 1.]))""")
def test_optional_tensor_list(self):
def f(x):
# test: an operator that takes in a List[Optional[Tensor]] argument
# (index_put)
y = x.view(8)
indices = torch.arange(4)
values = torch.arange(4, dtype=y.dtype)
y.index_put_((indices,), values, accumulate=False)
return y
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view_copy.default($0, [8])
$2 = torch._ops.aten.index_put.default($1, [tensor([0, 1, 2, 3])], tensor([0., 1., 2., 3.]))""")
def test_scalars(self):
def f(x):
# test: the pass can handle scalar inputs properly
tmp = torch.ones(4, 2)
y = x.view(4, 2)
y.add_(1)
z = 2 * y
z.div_(1)
return z
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view_copy.default($0, [4, 2])
$2 = torch._ops.aten.add.Tensor($1, 1)
$3 = torch._ops.aten.mul.Tensor($2, 2)
$4 = torch._ops.aten.div.Tensor($3, 1)""")
def test_metadata_change(self):
def f(x):
# ops like ge_() are allowed to change the dtype of the input.
# functionalization should pick up on that.
return x.ge_(0)
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.ge.Scalar($0, 0)
$2 = torch._ops.aten._to_copy.default($1, dtype=torch.float32, layout=torch.strided)""")
def test_only_one_view(self):
def f(x):
# This tests that we don't have any unnecessary views in the trace.
# If the input wasn't mutated, we don't need to regenerate it,
# so there should be a total of 1 op in the output trace.
return x.view(4, 2)
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view_copy.default($0, [4, 2])""")
def test_everything(self):
def f(x):
# test: everything
tmp = torch.ones(2, 2)
x2 = x + x
y = x2.view(8)
z0 = y.reshape(2, 4)
z1 = z0.transpose(1, 0)
z1.unsqueeze_(0)
z1.squeeze_()
z2, z3 = z1.split(2)
z2.add_(tmp)
z4 = z0[0] + z2.reshape(4)
return z2
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.add.Tensor($0, $0)
$2 = torch._ops.aten.view_copy.default($1, [8])
$3 = torch._ops.aten._reshape_alias_copy.default($2, [2, 4], [4, 1])
$4 = torch._ops.aten.transpose_copy.int($3, 1, 0)
$5 = torch._ops.aten.unsqueeze_copy.default($4, 0)
$6 = torch._ops.aten.squeeze_copy.default($5)
$7, $8 = torch._ops.aten.split_copy.Tensor($6, 2)
$9 = torch._ops.aten.add.Tensor($7, tensor([[1., 1.],
[1., 1.]]))
$10 = torch._ops.aten.select_copy.int($3, 0, 0)
$11 = torch._ops.aten.clone.default($9, memory_format=torch.contiguous_format)
$12 = torch._ops.aten._unsafe_view.default($11, [4])
$13 = torch._ops.aten.view_copy.default($1, [8])
$14 = torch._ops.aten._reshape_alias_copy.default($13, [2, 4], [4, 1])
$15 = torch._ops.aten.transpose_copy.int($14, 1, 0)
$16 = torch._ops.aten.unsqueeze_copy.default($15, 0)
$17 = torch._ops.aten.squeeze_copy.default($16)
$18 = torch._ops.aten.slice_scatter.default($17, $9, 0, 0, 2)
$19 = torch._ops.aten.unsqueeze_copy.default($18, 0)
$20 = torch._ops.aten.squeeze_copy.dim($19, 0)
$21 = torch._ops.aten.transpose_copy.int($20, 1, 0)
$22 = torch._ops.aten._reshape_alias_copy.default($21, [8], [1])
$23 = torch._ops.aten.view_copy.default($22, [4, 2])
$24 = torch._ops.aten.view_copy.default($23, [8])
$25 = torch._ops.aten._reshape_alias_copy.default($24, [2, 4], [4, 1])
$26 = torch._ops.aten.select_copy.int($25, 0, 0)
$27 = torch._ops.aten.add.Tensor($26, $12)""")
def test_reapply_views_simple(self):
def f(x):
tmp = torch.ones(4, 2)
y = x.view(4, 2)
y.add_(tmp)
z = x * x
return y
self.assert_functionalization(f, torch.ones(4, 2), reapply_views=True)
logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True)
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.view.default($0, [4, 2])
$2 = torch._ops.aten.add.Tensor($1, tensor([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.]]))
$3 = torch._ops.aten.view.default($2, [4, 2])
$4 = torch._ops.aten.mul.Tensor($3, $3)""")
def test_aliases_maintained_after_pass_when_reapplying_views(self):
def f(x):
tmp = torch.ones(4, 2)
y = x.view(4, 2)
z = x.view(4, 2)
y.add_(tmp)
return y, z
input_functional = torch._to_functional_tensor(torch.ones(4, 2))
torch._enable_functionalization(reapply_views=True)
try:
y, z = f(input_functional)
torch._sync(y)
torch._sync(z)
finally:
torch._disable_functionalization()
# y and z are aliases inside of the function, and that aliasing relationship should be maintained.
_y = torch._from_functional_tensor(y)
_z = torch._from_functional_tensor(z)
self.assertTrue(are_aliased(_y, _z))
# copy_() gets its own test, because it is special cased in functionalization.
# self.copy_(src) decomposes into src.to(self).expand_as(self).
def test_copy_(self):
def f(x):
tmp = torch.zeros(2, 2)
# NOTE: LoggingTensor isn't a mode, which means that the diagonal call
# will not be logged. This is fine for testing.
tmp_slice = tmp.diagonal()
y = tmp_slice.copy_(x)
z = y.add_(x)
return z
# Test 1: copy_() with same dtype and shape
# to() is a composite op that noops when the dtype/shape match, so nothing gets logged.
# self.assert_functionalization(f, torch.ones(2))
logs = self.get_logs(f, torch.ones(2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.copy.default(tensor([0., 0.]), $0)
$2 = torch._ops.aten.add.Tensor($1, $0)""")
# Test 2: copy_() with same dtype, different shape
self.assert_functionalization(f, torch.ones(1))
logs = self.get_logs(f, torch.ones(1))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.copy.default(tensor([0., 0.]), $0)
$2 = torch._ops.aten.add.Tensor($1, $0)""")
# Test 3: copy_() with different dtype, same shape
self.assert_functionalization(f, torch.ones(2, dtype=torch.long))
logs = self.get_logs(f, torch.ones(2, dtype=torch.long))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.copy.default(tensor([0., 0.]), $0)
$2 = torch._ops.aten.add.Tensor($1, $0)""")
# Test 4: copy_() with different dtype, different shape
self.assert_functionalization(f, torch.ones(1, dtype=torch.long))
logs = self.get_logs(f, torch.ones(1, dtype=torch.long))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.copy.default(tensor([0., 0.]), $0)
$2 = torch._ops.aten.add.Tensor($1, $0)""")
def test_fill_(self):
def f(x):
y = x + x
z = y.diagonal()
z.fill_(0)
return y
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.add.Tensor($0, $0)
$2 = torch._ops.aten.diagonal_copy.default($1)
$3 = torch._ops.aten.fill.Scalar($2, 0)""")
def test_resize_smaller(self):
def f(w):
# Resizing to a smaller size doesn't affect storage
x = w + 1
y = x.view(4, 4)
y.resize_(3, 3)
y2 = y.view(-1)
y2.add_(1)
z = y + 1
return z
self.assert_functionalization(f, torch.ones(8, 2))
logs = self.get_logs(f, torch.ones(8, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.add.Tensor($0, 1)
$2 = torch._ops.aten.view_copy.default($1, [4, 4])
$3 = torch._ops.aten.resize.functional($2, [3, 3])
$4 = torch._ops.aten.as_strided_copy.default($2, [3, 3], [3, 1])
$5 = torch._ops.aten.view_copy.default($4, [-1])
$6 = torch._ops.aten.add.Tensor($5, 1)
$7 = torch._ops.aten.view_copy.default($1, [4, 4])
$8 = torch._ops.aten.as_strided_copy.default($7, [3, 3], [3, 1])
$9 = torch._ops.aten.view_copy.default($6, [3, 3])
$10 = torch._ops.aten.as_strided_scatter.default($7, $9, [3, 3], [3, 1])
$11 = torch._ops.aten.view_copy.default($10, [8, 2])
$12 = torch._ops.aten.view_copy.default($11, [4, 4])
$13 = torch._ops.aten.as_strided_copy.default($12, [3, 3], [3, 1])
$14 = torch._ops.aten.add.Tensor($13, 1)""")
def test_resize_larger_valid(self):
def f(x):
y = x + 1
# resizing a tensor to a larger size is only currently allowed
# if the tensor-to-resize is not a view / has no outstanding views.
# See Note [resize_() in functionalization pass]
y.resize_(5, 5)
y2 = y.view(25)
# Do a mutation to ensure that aliases of the output of resize_()
# propagate mutations correctly.
# I'm using fill_ specifically because I want to guarantee that
# none of the output has uninitialized memory at the end
# (since these tests compare the data output against a reference impl)
y2.fill_(1)
out = y + 1
return y, out
self.assert_functionalization(f, torch.ones(8, 2))
logs = self.get_logs(f, torch.ones(8, 2))
self.assertExpectedInline('\n'.join(logs), """\
$0 = input('input')
$1 = torch._ops.aten.add.Tensor($0, 1)
$2 = torch._ops.aten.resize.functional($1, [5, 5])
$3 = torch._ops.aten.view_copy.default($2, [25])
$4 = torch._ops.aten.fill.Scalar($3, 1)
$5 = torch._ops.aten.view_copy.default($4, [5, 5])
$6 = torch._ops.aten.add.Tensor($5, 1)""")
def test_resize_larger_invalid(self):
def f(x):
y = x + 1
z = y.view(4, 4)
# resizing a tensor to a larger size is only currently allowed
# if the tensor-to-resize is not a view / has no outstanding views.
# See Note [resize_() in functionalization pass]
# This should fail
z.resize_(5, 5)
z2 = z.view(25)
z2.fill_(1)
out = z + 1
return y, out
with self.assertRaisesRegex(
RuntimeError,
r'Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass'):
self.assert_functionalization(f, torch.ones(8, 2))
def test_nested_functions_propagate_updates(self):
def g(x):
# Create a view of x
y = x[0]
y.add_(1)
# The view, y, gets deallocated at the end of this function
def f(x):
# Calling g(x) should mutate x
g(x)
# We expect x to be synced here, even though the alias created in g() has been deallocated!
y = x + x
return y
self.assert_functionalization(f, torch.ones(2, 2))
def test_mixed_wrappers_valid(self):
def f(x, y):
z = x + y
z.add_(1)
return z
x1_not_functional = LoggingTensor(torch.ones(4))
x2_functional = torch._to_functional_tensor(LoggingTensor(torch.ones(4)))
with capture_logs() as logs:
y = f(x1_not_functional, x2_functional)
# Make sure that functionalization ran the "+" kernel
# with a functional + non-functional tensor, and wrapped the output appropriately.
self.assertExpectedInline('\n'.join(logs), """\
$2 = torch._ops.aten.add.Tensor($0, $1)
$3 = torch._ops.aten.add.Tensor($2, 1)""")
def test_mixed_wrappers_invalid(self):
x1_not_functional = torch.ones(4)
x2_functional = torch._to_functional_tensor(torch.ones(4))
# When dealing with mixed functional + non functional tensors,
# normal_tensor.add_(functional_tensor) is not valid
# because normal_tensor would need to be "promoted" to a functional tensor.
with self.assertRaises(RuntimeError):
x1_not_functional.add_(x2_functional)
# This tests the behavior of functionalization with multiple layers of wrapped tensor subclasses.
def test_multiple_levels_of_wrapping(self):
def f(x):
# call an inplace op and have it get logged twice (by the outer + inner wrapper)
x.add_(1)
# Test 1: both the inner and outer wrapper are "functionalized"
x_inner_and_outer_functional = torch._to_functional_tensor(
InplaceLoggingTensor(torch._to_functional_tensor(LoggingTensor(torch.ones(4)))))
with capture_logs() as logs:
f(x_inner_and_outer_functional)
# Since both wrappers were unctionalized, they both log "add"
self.assertExpectedInline('\n'.join(logs), """\
$1 = torch._ops.aten.add.Tensor($0, 1)
$3 = torch._ops.aten.add.Tensor($2, 1)""")
# Test 2: only the inner wrapper is "functionalized"
x_only_inner_functional = InplaceLoggingTensor(torch._to_functional_tensor(LoggingTensor(torch.ones(4))))
with capture_logs() as logs:
f(x_only_inner_functional)
# Since only the inner wrapper is functionalized, then the inner (first) log is functionalized
self.assertExpectedInline('\n'.join(logs), """\
$1 = torch._ops.aten.add.Tensor($0, 1)
$3 = torch._ops.aten.add_.Tensor($2, 1)""")
# Test 3: only the inner wrapper is "functionalized"
x_only_outer_functional = torch._to_functional_tensor(InplaceLoggingTensor(LoggingTensor(torch.ones(4))))
with capture_logs() as logs:
f(x_only_outer_functional)
# Only the outer add_ is functionalized
# Since only the outer wrapper is functionalized, then the outer (second) log is functionalized
self.assertExpectedInline('\n'.join(logs), """\
$1 = torch._ops.aten.add_.Tensor($0, 1)
$3 = torch._ops.aten.add.Tensor($2, 1)""")
if __name__ == '__main__':
run_tests()