|
| 1 | +from pytensor.link.pytorch.dispatch.basic import pytorch_funcify |
| 2 | +from pytensor.tensor.subtensor import ( |
| 3 | + AdvancedIncSubtensor, |
| 4 | + AdvancedIncSubtensor1, |
| 5 | + AdvancedSubtensor, |
| 6 | + AdvancedSubtensor1, |
| 7 | + IncSubtensor, |
| 8 | + Subtensor, |
| 9 | + indices_from_subtensor, |
| 10 | +) |
| 11 | +from pytensor.tensor.type_other import MakeSlice, SliceType |
| 12 | + |
| 13 | + |
| 14 | +def check_negative_steps(indices): |
| 15 | + for index in indices: |
| 16 | + if isinstance(index, slice): |
| 17 | + if index.step is not None and index.step < 0: |
| 18 | + raise NotImplementedError( |
| 19 | + "Negative step sizes are not supported in Pytorch" |
| 20 | + ) |
| 21 | + |
| 22 | + |
| 23 | +@pytorch_funcify.register(Subtensor) |
| 24 | +def pytorch_funcify_Subtensor(op, node, **kwargs): |
| 25 | + idx_list = op.idx_list |
| 26 | + |
| 27 | + def subtensor(x, *flattened_indices): |
| 28 | + indices = indices_from_subtensor(flattened_indices, idx_list) |
| 29 | + check_negative_steps(indices) |
| 30 | + return x[indices] |
| 31 | + |
| 32 | + return subtensor |
| 33 | + |
| 34 | + |
| 35 | +@pytorch_funcify.register(MakeSlice) |
| 36 | +def pytorch_funcify_makeslice(op, **kwargs): |
| 37 | + def makeslice(*x): |
| 38 | + return slice(x) |
| 39 | + |
| 40 | + return makeslice |
| 41 | + |
| 42 | + |
| 43 | +@pytorch_funcify.register(AdvancedSubtensor1) |
| 44 | +@pytorch_funcify.register(AdvancedSubtensor) |
| 45 | +def pytorch_funcify_AdvSubtensor(op, node, **kwargs): |
| 46 | + def advsubtensor(x, *indices): |
| 47 | + check_negative_steps(indices) |
| 48 | + return x[indices] |
| 49 | + |
| 50 | + return advsubtensor |
| 51 | + |
| 52 | + |
| 53 | +@pytorch_funcify.register(IncSubtensor) |
| 54 | +def pytorch_funcify_IncSubtensor(op, node, **kwargs): |
| 55 | + idx_list = op.idx_list |
| 56 | + inplace = op.inplace |
| 57 | + if op.set_instead_of_inc: |
| 58 | + |
| 59 | + def set_subtensor(x, y, *flattened_indices): |
| 60 | + indices = indices_from_subtensor(flattened_indices, idx_list) |
| 61 | + check_negative_steps(indices) |
| 62 | + if not inplace: |
| 63 | + x = x.clone() |
| 64 | + x[indices] = y |
| 65 | + return x |
| 66 | + |
| 67 | + return set_subtensor |
| 68 | + |
| 69 | + else: |
| 70 | + |
| 71 | + def inc_subtensor(x, y, *flattened_indices): |
| 72 | + indices = indices_from_subtensor(flattened_indices, idx_list) |
| 73 | + check_negative_steps(indices) |
| 74 | + if not inplace: |
| 75 | + x = x.clone() |
| 76 | + x[indices] += y |
| 77 | + return x |
| 78 | + |
| 79 | + return inc_subtensor |
| 80 | + |
| 81 | + |
| 82 | +@pytorch_funcify.register(AdvancedIncSubtensor) |
| 83 | +@pytorch_funcify.register(AdvancedIncSubtensor1) |
| 84 | +def pytorch_funcify_AdvancedIncSubtensor(op, node, **kwargs): |
| 85 | + inplace = op.inplace |
| 86 | + ignore_duplicates = getattr(op, "ignore_duplicates", False) |
| 87 | + |
| 88 | + if op.set_instead_of_inc: |
| 89 | + |
| 90 | + def adv_set_subtensor(x, y, *indices): |
| 91 | + check_negative_steps(indices) |
| 92 | + if not inplace: |
| 93 | + x = x.clone() |
| 94 | + x[indices] = y.type_as(x) |
| 95 | + return x |
| 96 | + |
| 97 | + return adv_set_subtensor |
| 98 | + |
| 99 | + elif ignore_duplicates: |
| 100 | + |
| 101 | + def adv_inc_subtensor_no_duplicates(x, y, *indices): |
| 102 | + check_negative_steps(indices) |
| 103 | + if not inplace: |
| 104 | + x = x.clone() |
| 105 | + x[indices] += y.type_as(x) |
| 106 | + return x |
| 107 | + |
| 108 | + return adv_inc_subtensor_no_duplicates |
| 109 | + |
| 110 | + else: |
| 111 | + if any(isinstance(idx.type, SliceType) for idx in node.inputs[2:]): |
| 112 | + raise NotImplementedError( |
| 113 | + "IncSubtensor with potential duplicates indexes and slice indexing not implemented in PyTorch" |
| 114 | + ) |
| 115 | + |
| 116 | + def adv_inc_subtensor(x, y, *indices): |
| 117 | + # Not needed because slices aren't supported |
| 118 | + # check_negative_steps(indices) |
| 119 | + if not inplace: |
| 120 | + x = x.clone() |
| 121 | + x.index_put_(indices, y.type_as(x), accumulate=True) |
| 122 | + return x |
| 123 | + |
| 124 | + return adv_inc_subtensor |
0 commit comments