1 | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS |
2 | // @generated by torchgen/gen.py from RegisterFunctionalization.cpp |
3 | |
4 | #include <ATen/core/LegacyTypeDispatch.h> |
5 | #include <ATen/EmptyTensor.h> |
6 | #include <ATen/FunctionalTensorWrapper.h> |
7 | #include <ATen/FunctionalInverses.h> |
8 | #include <torch/library.h> |
9 | |
10 | #ifndef AT_PER_OPERATOR_HEADERS |
11 | #include <ATen/Operators.h> |
12 | #include <ATen/NativeFunctions.h> |
13 | #else |
14 | // needed for the meta tensor calls to get stride info in functionalization |
15 | #include <ATen/ops/empty_strided_native.h> |
16 | // needed for special handling of copy_(). |
17 | // See Note [functionalizating copy_() and not preserving strides] |
18 | #include <ATen/ops/to_ops.h> |
19 | #include <ATen/ops/expand_copy_ops.h> |
20 | |
21 | #include <ATen/ops/_new_zeros_with_same_feature_meta_native.h> |
22 | #include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h> |
23 | #include <ATen/ops/_new_zeros_with_same_feature_meta_native.h> |
24 | #include <ATen/ops/_new_zeros_with_same_feature_meta_ops.h> |
25 | #include <ATen/ops/_cudnn_init_dropout_state_native.h> |
26 | #include <ATen/ops/_cudnn_init_dropout_state_ops.h> |
27 | #include <ATen/ops/_cudnn_init_dropout_state_native.h> |
28 | #include <ATen/ops/_cudnn_init_dropout_state_ops.h> |
29 | #include <ATen/ops/angle_native.h> |
30 | #include <ATen/ops/angle_ops.h> |
31 | #include <ATen/ops/angle_native.h> |
32 | #include <ATen/ops/angle_ops.h> |
33 | #include <ATen/ops/sgn_native.h> |
34 | #include <ATen/ops/sgn_ops.h> |
35 | #include <ATen/ops/sgn_native.h> |
36 | #include <ATen/ops/sgn_ops.h> |
37 | #include <ATen/ops/sgn_native.h> |
38 | #include <ATen/ops/sgn_ops.h> |
39 | #include <ATen/ops/_add_relu_native.h> |
40 | #include <ATen/ops/_add_relu_ops.h> |
41 | #include <ATen/ops/_add_relu_native.h> |
42 | #include <ATen/ops/_add_relu_ops.h> |
43 | #include <ATen/ops/_add_relu_native.h> |
44 | #include <ATen/ops/_add_relu_ops.h> |
45 | #include <ATen/ops/_add_relu_native.h> |
46 | #include <ATen/ops/_add_relu_ops.h> |
47 | #include <ATen/ops/_add_relu_native.h> |
48 | #include <ATen/ops/_add_relu_ops.h> |
49 | #include <ATen/ops/_add_relu_native.h> |
50 | #include <ATen/ops/_add_relu_ops.h> |
51 | #include <ATen/ops/any_native.h> |
52 | #include <ATen/ops/any_ops.h> |
53 | #include <ATen/ops/any_native.h> |
54 | #include <ATen/ops/any_ops.h> |
55 | #include <ATen/ops/any_native.h> |
56 | #include <ATen/ops/any_ops.h> |
57 | #include <ATen/ops/any_native.h> |
58 | #include <ATen/ops/any_ops.h> |
59 | #include <ATen/ops/argmin_native.h> |
60 | #include <ATen/ops/argmin_ops.h> |
61 | #include <ATen/ops/argmin_native.h> |
62 | #include <ATen/ops/argmin_ops.h> |
63 | #include <ATen/ops/acosh_native.h> |
64 | #include <ATen/ops/acosh_ops.h> |
65 | #include <ATen/ops/acosh_native.h> |
66 | #include <ATen/ops/acosh_ops.h> |
67 | #include <ATen/ops/acosh_native.h> |
68 | #include <ATen/ops/acosh_ops.h> |
69 | #include <ATen/ops/arcsin_native.h> |
70 | #include <ATen/ops/arcsin_ops.h> |
71 | #include <ATen/ops/arcsin_native.h> |
72 | #include <ATen/ops/arcsin_ops.h> |
73 | #include <ATen/ops/arcsin_native.h> |
74 | #include <ATen/ops/arcsin_ops.h> |
75 | #include <ATen/ops/arctan_native.h> |
76 | #include <ATen/ops/arctan_ops.h> |
77 | #include <ATen/ops/arctan_native.h> |
78 | #include <ATen/ops/arctan_ops.h> |
79 | #include <ATen/ops/arctan_native.h> |
80 | #include <ATen/ops/arctan_ops.h> |
81 | #include <ATen/ops/bincount_native.h> |
82 | #include <ATen/ops/bincount_ops.h> |
83 | #include <ATen/ops/bincount_native.h> |
84 | #include <ATen/ops/bincount_ops.h> |
85 | #include <ATen/ops/copysign_native.h> |
86 | #include <ATen/ops/copysign_ops.h> |
87 | #include <ATen/ops/copysign_native.h> |
88 | #include <ATen/ops/copysign_ops.h> |
89 | #include <ATen/ops/copysign_native.h> |
90 | #include <ATen/ops/copysign_ops.h> |
91 | #include <ATen/ops/copysign_native.h> |
92 | #include <ATen/ops/copysign_ops.h> |
93 | #include <ATen/ops/copysign_native.h> |
94 | #include <ATen/ops/copysign_ops.h> |
95 | #include <ATen/ops/copysign_native.h> |
96 | #include <ATen/ops/copysign_ops.h> |
97 | #include <ATen/ops/logical_or_native.h> |
98 | #include <ATen/ops/logical_or_ops.h> |
99 | #include <ATen/ops/logical_or_native.h> |
100 | #include <ATen/ops/logical_or_ops.h> |
101 | #include <ATen/ops/logical_or_native.h> |
102 | #include <ATen/ops/logical_or_ops.h> |
103 | #include <ATen/ops/cat_native.h> |
104 | #include <ATen/ops/cat_ops.h> |
105 | #include <ATen/ops/cat_native.h> |
106 | #include <ATen/ops/cat_ops.h> |
107 | #include <ATen/ops/cat_native.h> |
108 | #include <ATen/ops/cat_ops.h> |
109 | #include <ATen/ops/cat_native.h> |
110 | #include <ATen/ops/cat_ops.h> |
111 | #include <ATen/ops/concat_native.h> |
112 | #include <ATen/ops/concat_ops.h> |
113 | #include <ATen/ops/concat_native.h> |
114 | #include <ATen/ops/concat_ops.h> |
115 | #include <ATen/ops/concat_native.h> |
116 | #include <ATen/ops/concat_ops.h> |
117 | #include <ATen/ops/concat_native.h> |
118 | #include <ATen/ops/concat_ops.h> |
119 | #include <ATen/ops/ceil_native.h> |
120 | #include <ATen/ops/ceil_ops.h> |
121 | #include <ATen/ops/ceil_native.h> |
122 | #include <ATen/ops/ceil_ops.h> |
123 | #include <ATen/ops/ceil_native.h> |
124 | #include <ATen/ops/ceil_ops.h> |
125 | #include <ATen/ops/polar_native.h> |
126 | #include <ATen/ops/polar_ops.h> |
127 | #include <ATen/ops/polar_native.h> |
128 | #include <ATen/ops/polar_ops.h> |
129 | #include <ATen/ops/convolution_native.h> |
130 | #include <ATen/ops/convolution_ops.h> |
131 | #include <ATen/ops/convolution_native.h> |
132 | #include <ATen/ops/convolution_ops.h> |
133 | #include <ATen/ops/convolution_overrideable_native.h> |
134 | #include <ATen/ops/convolution_overrideable_ops.h> |
135 | #include <ATen/ops/convolution_overrideable_native.h> |
136 | #include <ATen/ops/convolution_overrideable_ops.h> |
137 | #include <ATen/ops/convolution_backward_overrideable_native.h> |
138 | #include <ATen/ops/convolution_backward_overrideable_ops.h> |
139 | #include <ATen/ops/convolution_backward_overrideable_native.h> |
140 | #include <ATen/ops/convolution_backward_overrideable_ops.h> |
141 | #include <ATen/ops/cudnn_affine_grid_generator_native.h> |
142 | #include <ATen/ops/cudnn_affine_grid_generator_ops.h> |
143 | #include <ATen/ops/cudnn_affine_grid_generator_native.h> |
144 | #include <ATen/ops/cudnn_affine_grid_generator_ops.h> |
145 | #include <ATen/ops/cudnn_batch_norm_backward_native.h> |
146 | #include <ATen/ops/cudnn_batch_norm_backward_ops.h> |
147 | #include <ATen/ops/cudnn_batch_norm_backward_native.h> |
148 | #include <ATen/ops/cudnn_batch_norm_backward_ops.h> |
149 | #include <ATen/ops/cudnn_convolution_relu_native.h> |
150 | #include <ATen/ops/cudnn_convolution_relu_ops.h> |
151 | #include <ATen/ops/cudnn_convolution_relu_native.h> |
152 | #include <ATen/ops/cudnn_convolution_relu_ops.h> |
153 | #include <ATen/ops/cudnn_convolution_add_relu_native.h> |
154 | #include <ATen/ops/cudnn_convolution_add_relu_ops.h> |
155 | #include <ATen/ops/cudnn_convolution_add_relu_native.h> |
156 | #include <ATen/ops/cudnn_convolution_add_relu_ops.h> |
157 | #include <ATen/ops/cummax_native.h> |
158 | #include <ATen/ops/cummax_ops.h> |
159 | #include <ATen/ops/cummax_native.h> |
160 | #include <ATen/ops/cummax_ops.h> |
161 | #include <ATen/ops/cummax_native.h> |
162 | #include <ATen/ops/cummax_ops.h> |
163 | #include <ATen/ops/cummax_native.h> |
164 | #include <ATen/ops/cummax_ops.h> |
165 | #include <ATen/ops/diff_native.h> |
166 | #include <ATen/ops/diff_ops.h> |
167 | #include <ATen/ops/diff_native.h> |
168 | #include <ATen/ops/diff_ops.h> |
169 | #include <ATen/ops/embedding_renorm_native.h> |
170 | #include <ATen/ops/embedding_renorm_ops.h> |
171 | #include <ATen/ops/embedding_renorm_native.h> |
172 | #include <ATen/ops/embedding_renorm_ops.h> |
173 | #include <ATen/ops/embedding_renorm_native.h> |
174 | #include <ATen/ops/embedding_renorm_ops.h> |
175 | #include <ATen/ops/_embedding_bag_forward_only_native.h> |
176 | #include <ATen/ops/_embedding_bag_forward_only_ops.h> |
177 | #include <ATen/ops/_embedding_bag_forward_only_native.h> |
178 | #include <ATen/ops/_embedding_bag_forward_only_ops.h> |
179 | #include <ATen/ops/_embedding_bag_dense_backward_native.h> |
180 | #include <ATen/ops/_embedding_bag_dense_backward_ops.h> |
181 | #include <ATen/ops/_embedding_bag_dense_backward_native.h> |
182 | #include <ATen/ops/_embedding_bag_dense_backward_ops.h> |
183 | #include <ATen/ops/empty_native.h> |
184 | #include <ATen/ops/empty_ops.h> |
185 | #include <ATen/ops/empty_native.h> |
186 | #include <ATen/ops/empty_ops.h> |
187 | #include <ATen/ops/empty_native.h> |
188 | #include <ATen/ops/empty_ops.h> |
189 | #include <ATen/ops/empty_native.h> |
190 | #include <ATen/ops/empty_ops.h> |
191 | #include <ATen/ops/empty_like_native.h> |
192 | #include <ATen/ops/empty_like_ops.h> |
193 | #include <ATen/ops/empty_like_native.h> |
194 | #include <ATen/ops/empty_like_ops.h> |
195 | #include <ATen/ops/erf_native.h> |
196 | #include <ATen/ops/erf_ops.h> |
197 | #include <ATen/ops/erf_native.h> |
198 | #include <ATen/ops/erf_ops.h> |
199 | #include <ATen/ops/erf_native.h> |
200 | #include <ATen/ops/erf_ops.h> |
201 | #include <ATen/ops/erfc_native.h> |
202 | #include <ATen/ops/erfc_ops.h> |
203 | #include <ATen/ops/erfc_native.h> |
204 | #include <ATen/ops/erfc_ops.h> |
205 | #include <ATen/ops/erfc_native.h> |
206 | #include <ATen/ops/erfc_ops.h> |
207 | #include <ATen/ops/eye_native.h> |
208 | #include <ATen/ops/eye_ops.h> |
209 | #include <ATen/ops/eye_native.h> |
210 | #include <ATen/ops/eye_ops.h> |
211 | #include <ATen/ops/eye_native.h> |
212 | #include <ATen/ops/eye_ops.h> |
213 | #include <ATen/ops/eye_native.h> |
214 | #include <ATen/ops/eye_ops.h> |
215 | #include <ATen/ops/gcd_native.h> |
216 | #include <ATen/ops/gcd_ops.h> |
217 | #include <ATen/ops/gcd_native.h> |
218 | #include <ATen/ops/gcd_ops.h> |
219 | #include <ATen/ops/gcd_native.h> |
220 | #include <ATen/ops/gcd_ops.h> |
221 | #include <ATen/ops/grid_sampler_2d_backward_native.h> |
222 | #include <ATen/ops/grid_sampler_2d_backward_ops.h> |
223 | #include <ATen/ops/grid_sampler_2d_backward_native.h> |
224 | #include <ATen/ops/grid_sampler_2d_backward_ops.h> |
225 | #include <ATen/ops/native_group_norm_backward_native.h> |
226 | #include <ATen/ops/native_group_norm_backward_ops.h> |
227 | #include <ATen/ops/native_group_norm_backward_native.h> |
228 | #include <ATen/ops/native_group_norm_backward_ops.h> |
229 | #include <ATen/ops/_fft_r2c_native.h> |
230 | #include <ATen/ops/_fft_r2c_ops.h> |
231 | #include <ATen/ops/_fft_r2c_native.h> |
232 | #include <ATen/ops/_fft_r2c_ops.h> |
233 | #include <ATen/ops/index_native.h> |
234 | #include <ATen/ops/index_ops.h> |
235 | #include <ATen/ops/index_native.h> |
236 | #include <ATen/ops/index_ops.h> |
237 | #include <ATen/ops/index_put_native.h> |
238 | #include <ATen/ops/index_put_ops.h> |
239 | #include <ATen/ops/index_put_native.h> |
240 | #include <ATen/ops/index_put_ops.h> |
241 | #include <ATen/ops/index_put_native.h> |
242 | #include <ATen/ops/index_put_ops.h> |
243 | #include <ATen/ops/linear_backward_native.h> |
244 | #include <ATen/ops/linear_backward_ops.h> |
245 | #include <ATen/ops/linear_backward_native.h> |
246 | #include <ATen/ops/linear_backward_ops.h> |
247 | #include <ATen/ops/mkldnn_linear_backward_weights_native.h> |
248 | #include <ATen/ops/mkldnn_linear_backward_weights_ops.h> |
249 | #include <ATen/ops/mkldnn_linear_backward_weights_native.h> |
250 | #include <ATen/ops/mkldnn_linear_backward_weights_ops.h> |
251 | #include <ATen/ops/logaddexp_native.h> |
252 | #include <ATen/ops/logaddexp_ops.h> |
253 | #include <ATen/ops/logaddexp_native.h> |
254 | #include <ATen/ops/logaddexp_ops.h> |
255 | #include <ATen/ops/_logcumsumexp_native.h> |
256 | #include <ATen/ops/_logcumsumexp_ops.h> |
257 | #include <ATen/ops/_logcumsumexp_native.h> |
258 | #include <ATen/ops/_logcumsumexp_ops.h> |
259 | #include <ATen/ops/logcumsumexp_native.h> |
260 | #include <ATen/ops/logcumsumexp_ops.h> |
261 | #include <ATen/ops/logcumsumexp_native.h> |
262 | #include <ATen/ops/logcumsumexp_ops.h> |
263 | #include <ATen/ops/logcumsumexp_native.h> |
264 | #include <ATen/ops/logcumsumexp_ops.h> |
265 | #include <ATen/ops/logcumsumexp_native.h> |
266 | #include <ATen/ops/logcumsumexp_ops.h> |
267 | #include <ATen/ops/matmul_backward_native.h> |
268 | #include <ATen/ops/matmul_backward_ops.h> |
269 | #include <ATen/ops/matmul_backward_native.h> |
270 | #include <ATen/ops/matmul_backward_ops.h> |
271 | #include <ATen/ops/mps_max_pool2d_backward_native.h> |
272 | #include <ATen/ops/mps_max_pool2d_backward_ops.h> |
273 | #include <ATen/ops/mps_max_pool2d_backward_native.h> |
274 | #include <ATen/ops/mps_max_pool2d_backward_ops.h> |
275 | #include <ATen/ops/median_native.h> |
276 | #include <ATen/ops/median_ops.h> |
277 | #include <ATen/ops/median_native.h> |
278 | #include <ATen/ops/median_ops.h> |
279 | #include <ATen/ops/median_native.h> |
280 | #include <ATen/ops/median_ops.h> |
281 | #include <ATen/ops/median_native.h> |
282 | #include <ATen/ops/median_ops.h> |
283 | #include <ATen/ops/median_native.h> |
284 | #include <ATen/ops/median_ops.h> |
285 | #include <ATen/ops/median_native.h> |
286 | #include <ATen/ops/median_ops.h> |
287 | #include <ATen/ops/amin_native.h> |
288 | #include <ATen/ops/amin_ops.h> |
289 | #include <ATen/ops/amin_native.h> |
290 | #include <ATen/ops/amin_ops.h> |
291 | #include <ATen/ops/mkldnn_rnn_layer_backward_native.h> |
292 | #include <ATen/ops/mkldnn_rnn_layer_backward_ops.h> |
293 | #include <ATen/ops/mkldnn_rnn_layer_backward_native.h> |
294 | #include <ATen/ops/mkldnn_rnn_layer_backward_ops.h> |
295 | #include <ATen/ops/miopen_convolution_native.h> |
296 | #include <ATen/ops/miopen_convolution_ops.h> |
297 | #include <ATen/ops/miopen_convolution_native.h> |
298 | #include <ATen/ops/miopen_convolution_ops.h> |
299 | #include <ATen/ops/miopen_depthwise_convolution_native.h> |
300 | #include <ATen/ops/miopen_depthwise_convolution_ops.h> |
301 | #include <ATen/ops/miopen_depthwise_convolution_native.h> |
302 | #include <ATen/ops/miopen_depthwise_convolution_ops.h> |
303 | #include <ATen/ops/miopen_rnn_backward_native.h> |
304 | #include <ATen/ops/miopen_rnn_backward_ops.h> |
305 | #include <ATen/ops/miopen_rnn_backward_native.h> |
306 | #include <ATen/ops/miopen_rnn_backward_ops.h> |
307 | #include <ATen/ops/native_batch_norm_native.h> |
308 | #include <ATen/ops/native_batch_norm_ops.h> |
309 | #include <ATen/ops/native_batch_norm_native.h> |
310 | #include <ATen/ops/native_batch_norm_ops.h> |
311 | #include <ATen/ops/batch_norm_elemt_native.h> |
312 | #include <ATen/ops/batch_norm_elemt_ops.h> |
313 | #include <ATen/ops/batch_norm_elemt_native.h> |
314 | #include <ATen/ops/batch_norm_elemt_ops.h> |
315 | #include <ATen/ops/batch_norm_update_stats_native.h> |
316 | #include <ATen/ops/batch_norm_update_stats_ops.h> |
317 | #include <ATen/ops/batch_norm_update_stats_native.h> |
318 | #include <ATen/ops/batch_norm_update_stats_ops.h> |
319 | #include <ATen/ops/_nnpack_spatial_convolution_native.h> |
320 | #include <ATen/ops/_nnpack_spatial_convolution_ops.h> |
321 | #include <ATen/ops/_nnpack_spatial_convolution_native.h> |
322 | #include <ATen/ops/_nnpack_spatial_convolution_ops.h> |
323 | #include <ATen/ops/ones_like_native.h> |
324 | #include <ATen/ops/ones_like_ops.h> |
325 | #include <ATen/ops/ones_like_native.h> |
326 | #include <ATen/ops/ones_like_ops.h> |
327 | #include <ATen/ops/channel_shuffle_native.h> |
328 | #include <ATen/ops/channel_shuffle_ops.h> |
329 | #include <ATen/ops/channel_shuffle_native.h> |
330 | #include <ATen/ops/channel_shuffle_ops.h> |
331 | #include <ATen/ops/randint_native.h> |
332 | #include <ATen/ops/randint_ops.h> |
333 | #include <ATen/ops/randint_native.h> |
334 | #include <ATen/ops/randint_ops.h> |
335 | #include <ATen/ops/randint_native.h> |
336 | #include <ATen/ops/randint_ops.h> |
337 | #include <ATen/ops/randint_native.h> |
338 | #include <ATen/ops/randint_ops.h> |
339 | #include <ATen/ops/randint_native.h> |
340 | #include <ATen/ops/randint_ops.h> |
341 | #include <ATen/ops/randint_native.h> |
342 | #include <ATen/ops/randint_ops.h> |
343 | #include <ATen/ops/randint_native.h> |
344 | #include <ATen/ops/randint_ops.h> |
345 | #include <ATen/ops/randint_native.h> |
346 | #include <ATen/ops/randint_ops.h> |
347 | #include <ATen/ops/randperm_native.h> |
348 | #include <ATen/ops/randperm_ops.h> |
349 | #include <ATen/ops/randperm_native.h> |
350 | #include <ATen/ops/randperm_ops.h> |
351 | #include <ATen/ops/randperm_native.h> |
352 | #include <ATen/ops/randperm_ops.h> |
353 | #include <ATen/ops/randperm_native.h> |
354 | #include <ATen/ops/randperm_ops.h> |
355 | #include <ATen/ops/range_native.h> |
356 | #include <ATen/ops/range_ops.h> |
357 | #include <ATen/ops/range_native.h> |
358 | #include <ATen/ops/range_ops.h> |
359 | #include <ATen/ops/range_native.h> |
360 | #include <ATen/ops/range_ops.h> |
361 | #include <ATen/ops/range_native.h> |
362 | #include <ATen/ops/range_ops.h> |
363 | #include <ATen/ops/reciprocal_native.h> |
364 | #include <ATen/ops/reciprocal_ops.h> |
365 | #include <ATen/ops/reciprocal_native.h> |
366 | #include <ATen/ops/reciprocal_ops.h> |
367 | #include <ATen/ops/reciprocal_native.h> |
368 | #include <ATen/ops/reciprocal_ops.h> |
369 | #include <ATen/ops/gelu_backward_native.h> |
370 | #include <ATen/ops/gelu_backward_ops.h> |
371 | #include <ATen/ops/gelu_backward_native.h> |
372 | #include <ATen/ops/gelu_backward_ops.h> |
373 | #include <ATen/ops/hardshrink_native.h> |
374 | #include <ATen/ops/hardshrink_ops.h> |
375 | #include <ATen/ops/hardshrink_native.h> |
376 | #include <ATen/ops/hardshrink_ops.h> |
377 | #include <ATen/ops/hardshrink_backward_native.h> |
378 | #include <ATen/ops/hardshrink_backward_ops.h> |
379 | #include <ATen/ops/hardshrink_backward_native.h> |
380 | #include <ATen/ops/hardshrink_backward_ops.h> |
381 | #include <ATen/ops/silu_native.h> |
382 | #include <ATen/ops/silu_ops.h> |
383 | #include <ATen/ops/silu_native.h> |
384 | #include <ATen/ops/silu_ops.h> |
385 | #include <ATen/ops/silu_native.h> |
386 | #include <ATen/ops/silu_ops.h> |
387 | #include <ATen/ops/silu_backward_native.h> |
388 | #include <ATen/ops/silu_backward_ops.h> |
389 | #include <ATen/ops/silu_backward_native.h> |
390 | #include <ATen/ops/silu_backward_ops.h> |
391 | #include <ATen/ops/sin_native.h> |
392 | #include <ATen/ops/sin_ops.h> |
393 | #include <ATen/ops/sin_native.h> |
394 | #include <ATen/ops/sin_ops.h> |
395 | #include <ATen/ops/sin_native.h> |
396 | #include <ATen/ops/sin_ops.h> |
397 | #include <ATen/ops/_softmax_native.h> |
398 | #include <ATen/ops/_softmax_ops.h> |
399 | #include <ATen/ops/_softmax_native.h> |
400 | #include <ATen/ops/_softmax_ops.h> |
401 | #include <ATen/ops/sspaddmm_native.h> |
402 | #include <ATen/ops/sspaddmm_ops.h> |
403 | #include <ATen/ops/sspaddmm_native.h> |
404 | #include <ATen/ops/sspaddmm_ops.h> |
405 | #include <ATen/ops/_stack_native.h> |
406 | #include <ATen/ops/_stack_ops.h> |
407 | #include <ATen/ops/_stack_native.h> |
408 | #include <ATen/ops/_stack_ops.h> |
409 | #include <ATen/ops/hstack_native.h> |
410 | #include <ATen/ops/hstack_ops.h> |
411 | #include <ATen/ops/hstack_native.h> |
412 | #include <ATen/ops/hstack_ops.h> |
413 | #include <ATen/ops/dstack_native.h> |
414 | #include <ATen/ops/dstack_ops.h> |
415 | #include <ATen/ops/dstack_native.h> |
416 | #include <ATen/ops/dstack_ops.h> |
417 | #include <ATen/ops/sum_native.h> |
418 | #include <ATen/ops/sum_ops.h> |
419 | #include <ATen/ops/sum_native.h> |
420 | #include <ATen/ops/sum_ops.h> |
421 | #include <ATen/ops/sum_native.h> |
422 | #include <ATen/ops/sum_ops.h> |
423 | #include <ATen/ops/sum_native.h> |
424 | #include <ATen/ops/sum_ops.h> |
425 | #include <ATen/ops/sum_native.h> |
426 | #include <ATen/ops/sum_ops.h> |
427 | #include <ATen/ops/sum_native.h> |
428 | #include <ATen/ops/sum_ops.h> |
429 | #include <ATen/ops/std_native.h> |
430 | #include <ATen/ops/std_ops.h> |
431 | #include <ATen/ops/std_native.h> |
432 | #include <ATen/ops/std_ops.h> |
433 | #include <ATen/ops/std_native.h> |
434 | #include <ATen/ops/std_ops.h> |
435 | #include <ATen/ops/std_native.h> |
436 | #include <ATen/ops/std_ops.h> |
437 | #include <ATen/ops/std_native.h> |
438 | #include <ATen/ops/std_ops.h> |
439 | #include <ATen/ops/std_native.h> |
440 | #include <ATen/ops/std_ops.h> |
441 | #include <ATen/ops/std_native.h> |
442 | #include <ATen/ops/std_ops.h> |
443 | #include <ATen/ops/std_native.h> |
444 | #include <ATen/ops/std_ops.h> |
445 | #include <ATen/ops/tan_native.h> |
446 | #include <ATen/ops/tan_ops.h> |
447 | #include <ATen/ops/tan_native.h> |
448 | #include <ATen/ops/tan_ops.h> |
449 | #include <ATen/ops/tan_native.h> |
450 | #include <ATen/ops/tan_ops.h> |
451 | #include <ATen/ops/tensordot_native.h> |
452 | #include <ATen/ops/tensordot_ops.h> |
453 | #include <ATen/ops/tensordot_native.h> |
454 | #include <ATen/ops/tensordot_ops.h> |
455 | #include <ATen/ops/threshold_native.h> |
456 | #include <ATen/ops/threshold_ops.h> |
457 | #include <ATen/ops/threshold_native.h> |
458 | #include <ATen/ops/threshold_ops.h> |
459 | #include <ATen/ops/threshold_native.h> |
460 | #include <ATen/ops/threshold_ops.h> |
461 | #include <ATen/ops/_nested_tensor_strides_native.h> |
462 | #include <ATen/ops/_nested_tensor_strides_ops.h> |
463 | #include <ATen/ops/_nested_tensor_strides_native.h> |
464 | #include <ATen/ops/_nested_tensor_strides_ops.h> |
465 | #include <ATen/ops/fix_native.h> |
466 | #include <ATen/ops/fix_ops.h> |
467 | #include <ATen/ops/fix_native.h> |
468 | #include <ATen/ops/fix_ops.h> |
469 | #include <ATen/ops/fix_native.h> |
470 | #include <ATen/ops/fix_ops.h> |
471 | #include <ATen/ops/unique_consecutive_native.h> |
472 | #include <ATen/ops/unique_consecutive_ops.h> |
473 | #include <ATen/ops/unique_consecutive_native.h> |
474 | #include <ATen/ops/unique_consecutive_ops.h> |
475 | #include <ATen/ops/var_native.h> |
476 | #include <ATen/ops/var_ops.h> |
477 | #include <ATen/ops/var_native.h> |
478 | #include <ATen/ops/var_ops.h> |
479 | #include <ATen/ops/var_native.h> |
480 | #include <ATen/ops/var_ops.h> |
481 | #include <ATen/ops/var_native.h> |
482 | #include <ATen/ops/var_ops.h> |
483 | #include <ATen/ops/var_native.h> |
484 | #include <ATen/ops/var_ops.h> |
485 | #include <ATen/ops/var_native.h> |
486 | #include <ATen/ops/var_ops.h> |
487 | #include <ATen/ops/var_native.h> |
488 | #include <ATen/ops/var_ops.h> |
489 | #include <ATen/ops/var_native.h> |
490 | #include <ATen/ops/var_ops.h> |
491 | #include <ATen/ops/_standard_gamma_grad_native.h> |
492 | #include <ATen/ops/_standard_gamma_grad_ops.h> |
493 | #include <ATen/ops/_standard_gamma_grad_native.h> |
494 | #include <ATen/ops/_standard_gamma_grad_ops.h> |
495 | #include <ATen/ops/poisson_native.h> |
496 | #include <ATen/ops/poisson_ops.h> |
497 | #include <ATen/ops/poisson_native.h> |
498 | #include <ATen/ops/poisson_ops.h> |
499 | #include <ATen/ops/_sparse_csr_sum_native.h> |
500 | #include <ATen/ops/_sparse_csr_sum_ops.h> |
501 | #include <ATen/ops/_sparse_csr_sum_native.h> |
502 | #include <ATen/ops/_sparse_csr_sum_ops.h> |
503 | #include <ATen/ops/_sparse_softmax_backward_data_native.h> |
504 | #include <ATen/ops/_sparse_softmax_backward_data_ops.h> |
505 | #include <ATen/ops/_sparse_softmax_backward_data_native.h> |
506 | #include <ATen/ops/_sparse_softmax_backward_data_ops.h> |
507 | #include <ATen/ops/_sparse_log_softmax_native.h> |
508 | #include <ATen/ops/_sparse_log_softmax_ops.h> |
509 | #include <ATen/ops/_sparse_log_softmax_native.h> |
510 | #include <ATen/ops/_sparse_log_softmax_ops.h> |
511 | #include <ATen/ops/_sparse_log_softmax_backward_data_native.h> |
512 | #include <ATen/ops/_sparse_log_softmax_backward_data_ops.h> |
513 | #include <ATen/ops/_sparse_log_softmax_backward_data_native.h> |
514 | #include <ATen/ops/_sparse_log_softmax_backward_data_ops.h> |
515 | #include <ATen/ops/resize_as_sparse_native.h> |
516 | #include <ATen/ops/resize_as_sparse_ops.h> |
517 | #include <ATen/ops/resize_as_sparse_native.h> |
518 | #include <ATen/ops/resize_as_sparse_ops.h> |
519 | #include <ATen/ops/resize_as_sparse_native.h> |
520 | #include <ATen/ops/resize_as_sparse_ops.h> |
521 | #include <ATen/ops/sub_native.h> |
522 | #include <ATen/ops/sub_ops.h> |
523 | #include <ATen/ops/sub_native.h> |
524 | #include <ATen/ops/sub_ops.h> |
525 | #include <ATen/ops/sub_native.h> |
526 | #include <ATen/ops/sub_ops.h> |
527 | #include <ATen/ops/sub_native.h> |
528 | #include <ATen/ops/sub_ops.h> |
529 | #include <ATen/ops/sub_native.h> |
530 | #include <ATen/ops/sub_ops.h> |
531 | #include <ATen/ops/sub_native.h> |
532 | #include <ATen/ops/sub_ops.h> |
533 | #include <ATen/ops/copy_sparse_to_sparse_native.h> |
534 | #include <ATen/ops/copy_sparse_to_sparse_ops.h> |
535 | #include <ATen/ops/copy_sparse_to_sparse_native.h> |
536 | #include <ATen/ops/copy_sparse_to_sparse_ops.h> |
537 | #include <ATen/ops/copy_sparse_to_sparse_native.h> |
538 | #include <ATen/ops/copy_sparse_to_sparse_ops.h> |
539 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h> |
540 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h> |
541 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_native.h> |
542 | #include <ATen/ops/mkldnn_reorder_conv2d_weight_ops.h> |
543 | #include <ATen/ops/_lstm_mps_native.h> |
544 | #include <ATen/ops/_lstm_mps_ops.h> |
545 | #include <ATen/ops/_lstm_mps_native.h> |
546 | #include <ATen/ops/_lstm_mps_ops.h> |
547 | #include <ATen/ops/_thnn_fused_gru_cell_backward_native.h> |
548 | #include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h> |
549 | #include <ATen/ops/_thnn_fused_gru_cell_backward_native.h> |
550 | #include <ATen/ops/_thnn_fused_gru_cell_backward_ops.h> |
551 | #include <ATen/ops/set_native.h> |
552 | #include <ATen/ops/set_ops.h> |
553 | #include <ATen/ops/set_native.h> |
554 | #include <ATen/ops/set_ops.h> |
555 | #include <ATen/ops/set_native.h> |
556 | #include <ATen/ops/set_ops.h> |
557 | #include <ATen/ops/set_native.h> |
558 | #include <ATen/ops/set_ops.h> |
559 | #include <ATen/ops/set_native.h> |
560 | #include <ATen/ops/set_ops.h> |
561 | #include <ATen/ops/set_native.h> |
562 | #include <ATen/ops/set_ops.h> |
563 | #include <ATen/ops/set_native.h> |
564 | #include <ATen/ops/set_ops.h> |
565 | #include <ATen/ops/set_native.h> |
566 | #include <ATen/ops/set_ops.h> |
567 | #include <ATen/ops/set_native.h> |
568 | #include <ATen/ops/set_ops.h> |
569 | #include <ATen/ops/set_native.h> |
570 | #include <ATen/ops/set_ops.h> |
571 | #include <ATen/ops/set_native.h> |
572 | #include <ATen/ops/set_ops.h> |
573 | #include <ATen/ops/set_native.h> |
574 | #include <ATen/ops/set_ops.h> |
575 | #include <ATen/ops/lift_native.h> |
576 | #include <ATen/ops/lift_ops.h> |
577 | #include <ATen/ops/lift_native.h> |
578 | #include <ATen/ops/lift_ops.h> |
579 | #include <ATen/ops/masked_scatter_native.h> |
580 | #include <ATen/ops/masked_scatter_ops.h> |
581 | #include <ATen/ops/masked_scatter_native.h> |
582 | #include <ATen/ops/masked_scatter_ops.h> |
583 | #include <ATen/ops/masked_scatter_native.h> |
584 | #include <ATen/ops/masked_scatter_ops.h> |
585 | #include <ATen/ops/index_reduce_native.h> |
586 | #include <ATen/ops/index_reduce_ops.h> |
587 | #include <ATen/ops/index_reduce_native.h> |
588 | #include <ATen/ops/index_reduce_ops.h> |
589 | #include <ATen/ops/index_reduce_native.h> |
590 | #include <ATen/ops/index_reduce_ops.h> |
591 | #include <ATen/ops/eq_native.h> |
592 | #include <ATen/ops/eq_ops.h> |
593 | #include <ATen/ops/eq_native.h> |
594 | #include <ATen/ops/eq_ops.h> |
595 | #include <ATen/ops/eq_native.h> |
596 | #include <ATen/ops/eq_ops.h> |
597 | #include <ATen/ops/eq_native.h> |
598 | #include <ATen/ops/eq_ops.h> |
599 | #include <ATen/ops/eq_native.h> |
600 | #include <ATen/ops/eq_ops.h> |
601 | #include <ATen/ops/eq_native.h> |
602 | #include <ATen/ops/eq_ops.h> |
603 | #include <ATen/ops/bitwise_and_native.h> |
604 | #include <ATen/ops/bitwise_and_ops.h> |
605 | #include <ATen/ops/bitwise_and_native.h> |
606 | #include <ATen/ops/bitwise_and_ops.h> |
607 | #include <ATen/ops/bitwise_and_native.h> |
608 | #include <ATen/ops/bitwise_and_ops.h> |
609 | #include <ATen/ops/bitwise_and_native.h> |
610 | #include <ATen/ops/bitwise_and_ops.h> |
611 | #include <ATen/ops/bitwise_and_native.h> |
612 | #include <ATen/ops/bitwise_and_ops.h> |
613 | #include <ATen/ops/bitwise_and_native.h> |
614 | #include <ATen/ops/bitwise_and_ops.h> |
615 | #include <ATen/ops/bitwise_and_native.h> |
616 | #include <ATen/ops/bitwise_and_ops.h> |
617 | #include <ATen/ops/bitwise_and_native.h> |
618 | #include <ATen/ops/bitwise_and_ops.h> |
619 | #include <ATen/ops/bitwise_or_native.h> |
620 | #include <ATen/ops/bitwise_or_ops.h> |
621 | #include <ATen/ops/bitwise_or_native.h> |
622 | #include <ATen/ops/bitwise_or_ops.h> |
623 | #include <ATen/ops/bitwise_or_native.h> |
624 | #include <ATen/ops/bitwise_or_ops.h> |
625 | #include <ATen/ops/bitwise_or_native.h> |
626 | #include <ATen/ops/bitwise_or_ops.h> |
627 | #include <ATen/ops/bitwise_or_native.h> |
628 | #include <ATen/ops/bitwise_or_ops.h> |
629 | #include <ATen/ops/bitwise_or_native.h> |
630 | #include <ATen/ops/bitwise_or_ops.h> |
631 | #include <ATen/ops/bitwise_or_native.h> |
632 | #include <ATen/ops/bitwise_or_ops.h> |
633 | #include <ATen/ops/bitwise_or_native.h> |
634 | #include <ATen/ops/bitwise_or_ops.h> |
635 | #include <ATen/ops/bitwise_left_shift_native.h> |
636 | #include <ATen/ops/bitwise_left_shift_ops.h> |
637 | #include <ATen/ops/bitwise_left_shift_native.h> |
638 | #include <ATen/ops/bitwise_left_shift_ops.h> |
639 | #include <ATen/ops/bitwise_left_shift_native.h> |
640 | #include <ATen/ops/bitwise_left_shift_ops.h> |
641 | #include <ATen/ops/bitwise_left_shift_native.h> |
642 | #include <ATen/ops/bitwise_left_shift_ops.h> |
643 | #include <ATen/ops/bitwise_left_shift_native.h> |
644 | #include <ATen/ops/bitwise_left_shift_ops.h> |
645 | #include <ATen/ops/bitwise_left_shift_native.h> |
646 | #include <ATen/ops/bitwise_left_shift_ops.h> |
647 | #include <ATen/ops/bitwise_left_shift_native.h> |
648 | #include <ATen/ops/bitwise_left_shift_ops.h> |
649 | #include <ATen/ops/bitwise_left_shift_native.h> |
650 | #include <ATen/ops/bitwise_left_shift_ops.h> |
651 | #include <ATen/ops/rshift_native.h> |
652 | #include <ATen/ops/rshift_ops.h> |
653 | #include <ATen/ops/rshift_native.h> |
654 | #include <ATen/ops/rshift_ops.h> |
655 | #include <ATen/ops/rshift_native.h> |
656 | #include <ATen/ops/rshift_ops.h> |
657 | #include <ATen/ops/rshift_native.h> |
658 | #include <ATen/ops/rshift_ops.h> |
659 | #include <ATen/ops/rshift_native.h> |
660 | #include <ATen/ops/rshift_ops.h> |
661 | #include <ATen/ops/rshift_native.h> |
662 | #include <ATen/ops/rshift_ops.h> |
663 | #include <ATen/ops/tril_native.h> |
664 | #include <ATen/ops/tril_ops.h> |
665 | #include <ATen/ops/tril_native.h> |
666 | #include <ATen/ops/tril_ops.h> |
667 | #include <ATen/ops/tril_native.h> |
668 | #include <ATen/ops/tril_ops.h> |
669 | #include <ATen/ops/triu_native.h> |
670 | #include <ATen/ops/triu_ops.h> |
671 | #include <ATen/ops/triu_native.h> |
672 | #include <ATen/ops/triu_ops.h> |
673 | #include <ATen/ops/triu_native.h> |
674 | #include <ATen/ops/triu_ops.h> |
675 | #include <ATen/ops/digamma_native.h> |
676 | #include <ATen/ops/digamma_ops.h> |
677 | #include <ATen/ops/digamma_native.h> |
678 | #include <ATen/ops/digamma_ops.h> |
679 | #include <ATen/ops/digamma_native.h> |
680 | #include <ATen/ops/digamma_ops.h> |
681 | #include <ATen/ops/lerp_native.h> |
682 | #include <ATen/ops/lerp_ops.h> |
683 | #include <ATen/ops/lerp_native.h> |
684 | #include <ATen/ops/lerp_ops.h> |
685 | #include <ATen/ops/lerp_native.h> |
686 | #include <ATen/ops/lerp_ops.h> |
687 | #include <ATen/ops/lerp_native.h> |
688 | #include <ATen/ops/lerp_ops.h> |
689 | #include <ATen/ops/lerp_native.h> |
690 | #include <ATen/ops/lerp_ops.h> |
691 | #include <ATen/ops/lerp_native.h> |
692 | #include <ATen/ops/lerp_ops.h> |
693 | #include <ATen/ops/uniform_native.h> |
694 | #include <ATen/ops/uniform_ops.h> |
695 | #include <ATen/ops/uniform_native.h> |
696 | #include <ATen/ops/uniform_ops.h> |
697 | #include <ATen/ops/uniform_native.h> |
698 | #include <ATen/ops/uniform_ops.h> |
699 | #include <ATen/ops/tril_indices_native.h> |
700 | #include <ATen/ops/tril_indices_ops.h> |
701 | #include <ATen/ops/tril_indices_native.h> |
702 | #include <ATen/ops/tril_indices_ops.h> |
703 | #include <ATen/ops/triu_indices_native.h> |
704 | #include <ATen/ops/triu_indices_ops.h> |
705 | #include <ATen/ops/triu_indices_native.h> |
706 | #include <ATen/ops/triu_indices_ops.h> |
707 | #include <ATen/ops/ge_native.h> |
708 | #include <ATen/ops/ge_ops.h> |
709 | #include <ATen/ops/ge_native.h> |
710 | #include <ATen/ops/ge_ops.h> |
711 | #include <ATen/ops/ge_native.h> |
712 | #include <ATen/ops/ge_ops.h> |
713 | #include <ATen/ops/ge_native.h> |
714 | #include <ATen/ops/ge_ops.h> |
715 | #include <ATen/ops/ge_native.h> |
716 | #include <ATen/ops/ge_ops.h> |
717 | #include <ATen/ops/ge_native.h> |
718 | #include <ATen/ops/ge_ops.h> |
719 | #include <ATen/ops/le_native.h> |
720 | #include <ATen/ops/le_ops.h> |
721 | #include <ATen/ops/le_native.h> |
722 | #include <ATen/ops/le_ops.h> |
723 | #include <ATen/ops/le_native.h> |
724 | #include <ATen/ops/le_ops.h> |
725 | #include <ATen/ops/le_native.h> |
726 | #include <ATen/ops/le_ops.h> |
727 | #include <ATen/ops/le_native.h> |
728 | #include <ATen/ops/le_ops.h> |
729 | #include <ATen/ops/le_native.h> |
730 | #include <ATen/ops/le_ops.h> |
731 | #include <ATen/ops/gt_native.h> |
732 | #include <ATen/ops/gt_ops.h> |
733 | #include <ATen/ops/gt_native.h> |
734 | #include <ATen/ops/gt_ops.h> |
735 | #include <ATen/ops/gt_native.h> |
736 | #include <ATen/ops/gt_ops.h> |
737 | #include <ATen/ops/gt_native.h> |
738 | #include <ATen/ops/gt_ops.h> |
739 | #include <ATen/ops/gt_native.h> |
740 | #include <ATen/ops/gt_ops.h> |
741 | #include <ATen/ops/gt_native.h> |
742 | #include <ATen/ops/gt_ops.h> |
743 | #include <ATen/ops/nonzero_native.h> |
744 | #include <ATen/ops/nonzero_ops.h> |
745 | #include <ATen/ops/nonzero_native.h> |
746 | #include <ATen/ops/nonzero_ops.h> |
747 | #include <ATen/ops/addcdiv_native.h> |
748 | #include <ATen/ops/addcdiv_ops.h> |
749 | #include <ATen/ops/addcdiv_native.h> |
750 | #include <ATen/ops/addcdiv_ops.h> |
751 | #include <ATen/ops/addcdiv_native.h> |
752 | #include <ATen/ops/addcdiv_ops.h> |
753 | #include <ATen/ops/triangular_solve_native.h> |
754 | #include <ATen/ops/triangular_solve_ops.h> |
755 | #include <ATen/ops/triangular_solve_native.h> |
756 | #include <ATen/ops/triangular_solve_ops.h> |
757 | #include <ATen/ops/cholesky_solve_native.h> |
758 | #include <ATen/ops/cholesky_solve_ops.h> |
759 | #include <ATen/ops/cholesky_solve_native.h> |
760 | #include <ATen/ops/cholesky_solve_ops.h> |
761 | #include <ATen/ops/cholesky_inverse_native.h> |
762 | #include <ATen/ops/cholesky_inverse_ops.h> |
763 | #include <ATen/ops/cholesky_inverse_native.h> |
764 | #include <ATen/ops/cholesky_inverse_ops.h> |
765 | #include <ATen/ops/qr_native.h> |
766 | #include <ATen/ops/qr_ops.h> |
767 | #include <ATen/ops/qr_native.h> |
768 | #include <ATen/ops/qr_ops.h> |
769 | #include <ATen/ops/geqrf_native.h> |
770 | #include <ATen/ops/geqrf_ops.h> |
771 | #include <ATen/ops/geqrf_native.h> |
772 | #include <ATen/ops/geqrf_ops.h> |
773 | #include <ATen/ops/orgqr_native.h> |
774 | #include <ATen/ops/orgqr_ops.h> |
775 | #include <ATen/ops/orgqr_native.h> |
776 | #include <ATen/ops/orgqr_ops.h> |
777 | #include <ATen/ops/lu_solve_native.h> |
778 | #include <ATen/ops/lu_solve_ops.h> |
779 | #include <ATen/ops/lu_solve_native.h> |
780 | #include <ATen/ops/lu_solve_ops.h> |
781 | #include <ATen/ops/lgamma_native.h> |
782 | #include <ATen/ops/lgamma_ops.h> |
783 | #include <ATen/ops/lgamma_native.h> |
784 | #include <ATen/ops/lgamma_ops.h> |
785 | #include <ATen/ops/lgamma_native.h> |
786 | #include <ATen/ops/lgamma_ops.h> |
787 | #include <ATen/ops/erfinv_native.h> |
788 | #include <ATen/ops/erfinv_ops.h> |
789 | #include <ATen/ops/erfinv_native.h> |
790 | #include <ATen/ops/erfinv_ops.h> |
791 | #include <ATen/ops/erfinv_native.h> |
792 | #include <ATen/ops/erfinv_ops.h> |
793 | #include <ATen/ops/i0_native.h> |
794 | #include <ATen/ops/i0_ops.h> |
795 | #include <ATen/ops/i0_native.h> |
796 | #include <ATen/ops/i0_ops.h> |
797 | #include <ATen/ops/i0_native.h> |
798 | #include <ATen/ops/i0_ops.h> |
799 | #include <ATen/ops/sign_native.h> |
800 | #include <ATen/ops/sign_ops.h> |
801 | #include <ATen/ops/sign_native.h> |
802 | #include <ATen/ops/sign_ops.h> |
803 | #include <ATen/ops/sign_native.h> |
804 | #include <ATen/ops/sign_ops.h> |
805 | #include <ATen/ops/signbit_native.h> |
806 | #include <ATen/ops/signbit_ops.h> |
807 | #include <ATen/ops/signbit_native.h> |
808 | #include <ATen/ops/signbit_ops.h> |
809 | #include <ATen/ops/atan2_native.h> |
810 | #include <ATen/ops/atan2_ops.h> |
811 | #include <ATen/ops/atan2_native.h> |
812 | #include <ATen/ops/atan2_ops.h> |
813 | #include <ATen/ops/atan2_native.h> |
814 | #include <ATen/ops/atan2_ops.h> |
815 | #include <ATen/ops/_histogramdd_bin_edges_native.h> |
816 | #include <ATen/ops/_histogramdd_bin_edges_ops.h> |
817 | #include <ATen/ops/_histogramdd_bin_edges_native.h> |
818 | #include <ATen/ops/_histogramdd_bin_edges_ops.h> |
819 | #include <ATen/ops/_histogramdd_from_bin_tensors_native.h> |
820 | #include <ATen/ops/_histogramdd_from_bin_tensors_ops.h> |
821 | #include <ATen/ops/_histogramdd_from_bin_tensors_native.h> |
822 | #include <ATen/ops/_histogramdd_from_bin_tensors_ops.h> |
823 | #include <ATen/ops/fmod_native.h> |
824 | #include <ATen/ops/fmod_ops.h> |
825 | #include <ATen/ops/fmod_native.h> |
826 | #include <ATen/ops/fmod_ops.h> |
827 | #include <ATen/ops/fmod_native.h> |
828 | #include <ATen/ops/fmod_ops.h> |
829 | #include <ATen/ops/fmod_native.h> |
830 | #include <ATen/ops/fmod_ops.h> |
831 | #include <ATen/ops/fmod_native.h> |
832 | #include <ATen/ops/fmod_ops.h> |
833 | #include <ATen/ops/fmod_native.h> |
834 | #include <ATen/ops/fmod_ops.h> |
835 | #include <ATen/ops/nextafter_native.h> |
836 | #include <ATen/ops/nextafter_ops.h> |
837 | #include <ATen/ops/nextafter_native.h> |
838 | #include <ATen/ops/nextafter_ops.h> |
839 | #include <ATen/ops/nextafter_native.h> |
840 | #include <ATen/ops/nextafter_ops.h> |
841 | #include <ATen/ops/minimum_native.h> |
842 | #include <ATen/ops/minimum_ops.h> |
843 | #include <ATen/ops/minimum_native.h> |
844 | #include <ATen/ops/minimum_ops.h> |
845 | #include <ATen/ops/topk_native.h> |
846 | #include <ATen/ops/topk_ops.h> |
847 | #include <ATen/ops/topk_native.h> |
848 | #include <ATen/ops/topk_ops.h> |
849 | #include <ATen/ops/any_native.h> |
850 | #include <ATen/ops/any_ops.h> |
851 | #include <ATen/ops/any_native.h> |
852 | #include <ATen/ops/any_ops.h> |
853 | #include <ATen/ops/_foreach_mul_native.h> |
854 | #include <ATen/ops/_foreach_mul_ops.h> |
855 | #include <ATen/ops/_foreach_mul_native.h> |
856 | #include <ATen/ops/_foreach_mul_ops.h> |
857 | #include <ATen/ops/_foreach_mul_native.h> |
858 | #include <ATen/ops/_foreach_mul_ops.h> |
859 | #include <ATen/ops/_foreach_div_native.h> |
860 | #include <ATen/ops/_foreach_div_ops.h> |
861 | #include <ATen/ops/_foreach_div_native.h> |
862 | #include <ATen/ops/_foreach_div_ops.h> |
863 | #include <ATen/ops/_foreach_div_native.h> |
864 | #include <ATen/ops/_foreach_div_ops.h> |
865 | #include <ATen/ops/_foreach_clamp_max_native.h> |
866 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
867 | #include <ATen/ops/_foreach_clamp_max_native.h> |
868 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
869 | #include <ATen/ops/_foreach_clamp_max_native.h> |
870 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
871 | #include <ATen/ops/_foreach_minimum_native.h> |
872 | #include <ATen/ops/_foreach_minimum_ops.h> |
873 | #include <ATen/ops/_foreach_minimum_native.h> |
874 | #include <ATen/ops/_foreach_minimum_ops.h> |
875 | #include <ATen/ops/_foreach_minimum_native.h> |
876 | #include <ATen/ops/_foreach_minimum_ops.h> |
877 | #include <ATen/ops/_foreach_mul_native.h> |
878 | #include <ATen/ops/_foreach_mul_ops.h> |
879 | #include <ATen/ops/_foreach_mul_native.h> |
880 | #include <ATen/ops/_foreach_mul_ops.h> |
881 | #include <ATen/ops/_foreach_mul_native.h> |
882 | #include <ATen/ops/_foreach_mul_ops.h> |
883 | #include <ATen/ops/_foreach_div_native.h> |
884 | #include <ATen/ops/_foreach_div_ops.h> |
885 | #include <ATen/ops/_foreach_div_native.h> |
886 | #include <ATen/ops/_foreach_div_ops.h> |
887 | #include <ATen/ops/_foreach_div_native.h> |
888 | #include <ATen/ops/_foreach_div_ops.h> |
889 | #include <ATen/ops/_foreach_clamp_max_native.h> |
890 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
891 | #include <ATen/ops/_foreach_clamp_max_native.h> |
892 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
893 | #include <ATen/ops/_foreach_clamp_max_native.h> |
894 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
895 | #include <ATen/ops/_foreach_minimum_native.h> |
896 | #include <ATen/ops/_foreach_minimum_ops.h> |
897 | #include <ATen/ops/_foreach_minimum_native.h> |
898 | #include <ATen/ops/_foreach_minimum_ops.h> |
899 | #include <ATen/ops/_foreach_minimum_native.h> |
900 | #include <ATen/ops/_foreach_minimum_ops.h> |
901 | #include <ATen/ops/_foreach_div_native.h> |
902 | #include <ATen/ops/_foreach_div_ops.h> |
903 | #include <ATen/ops/_foreach_div_native.h> |
904 | #include <ATen/ops/_foreach_div_ops.h> |
905 | #include <ATen/ops/_foreach_div_native.h> |
906 | #include <ATen/ops/_foreach_div_ops.h> |
907 | #include <ATen/ops/_foreach_mul_native.h> |
908 | #include <ATen/ops/_foreach_mul_ops.h> |
909 | #include <ATen/ops/_foreach_mul_native.h> |
910 | #include <ATen/ops/_foreach_mul_ops.h> |
911 | #include <ATen/ops/_foreach_mul_native.h> |
912 | #include <ATen/ops/_foreach_mul_ops.h> |
913 | #include <ATen/ops/_foreach_clamp_max_native.h> |
914 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
915 | #include <ATen/ops/_foreach_clamp_max_native.h> |
916 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
917 | #include <ATen/ops/_foreach_clamp_max_native.h> |
918 | #include <ATen/ops/_foreach_clamp_max_ops.h> |
919 | #include <ATen/ops/_foreach_minimum_native.h> |
920 | #include <ATen/ops/_foreach_minimum_ops.h> |
921 | #include <ATen/ops/_foreach_minimum_native.h> |
922 | #include <ATen/ops/_foreach_minimum_ops.h> |
923 | #include <ATen/ops/_foreach_minimum_native.h> |
924 | #include <ATen/ops/_foreach_minimum_ops.h> |
925 | #include <ATen/ops/_foreach_expm1_native.h> |
926 | #include <ATen/ops/_foreach_expm1_ops.h> |
927 | #include <ATen/ops/_foreach_expm1_native.h> |
928 | #include <ATen/ops/_foreach_expm1_ops.h> |
929 | #include <ATen/ops/_foreach_expm1_native.h> |
930 | #include <ATen/ops/_foreach_expm1_ops.h> |
931 | #include <ATen/ops/_foreach_tanh_native.h> |
932 | #include <ATen/ops/_foreach_tanh_ops.h> |
933 | #include <ATen/ops/_foreach_tanh_native.h> |
934 | #include <ATen/ops/_foreach_tanh_ops.h> |
935 | #include <ATen/ops/_foreach_tanh_native.h> |
936 | #include <ATen/ops/_foreach_tanh_ops.h> |
937 | #include <ATen/ops/_foreach_sin_native.h> |
938 | #include <ATen/ops/_foreach_sin_ops.h> |
939 | #include <ATen/ops/_foreach_sin_native.h> |
940 | #include <ATen/ops/_foreach_sin_ops.h> |
941 | #include <ATen/ops/_foreach_sin_native.h> |
942 | #include <ATen/ops/_foreach_sin_ops.h> |
943 | #include <ATen/ops/_foreach_frac_native.h> |
944 | #include <ATen/ops/_foreach_frac_ops.h> |
945 | #include <ATen/ops/_foreach_frac_native.h> |
946 | #include <ATen/ops/_foreach_frac_ops.h> |
947 | #include <ATen/ops/_foreach_frac_native.h> |
948 | #include <ATen/ops/_foreach_frac_ops.h> |
949 | #include <ATen/ops/_foreach_reciprocal_native.h> |
950 | #include <ATen/ops/_foreach_reciprocal_ops.h> |
951 | #include <ATen/ops/_foreach_reciprocal_native.h> |
952 | #include <ATen/ops/_foreach_reciprocal_ops.h> |
953 | #include <ATen/ops/_foreach_reciprocal_native.h> |
954 | #include <ATen/ops/_foreach_reciprocal_ops.h> |
955 | #include <ATen/ops/_foreach_trunc_native.h> |
956 | #include <ATen/ops/_foreach_trunc_ops.h> |
957 | #include <ATen/ops/_foreach_trunc_native.h> |
958 | #include <ATen/ops/_foreach_trunc_ops.h> |
959 | #include <ATen/ops/_foreach_trunc_native.h> |
960 | #include <ATen/ops/_foreach_trunc_ops.h> |
961 | #include <ATen/ops/_convert_indices_from_csr_to_coo_native.h> |
962 | #include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h> |
963 | #include <ATen/ops/_convert_indices_from_csr_to_coo_native.h> |
964 | #include <ATen/ops/_convert_indices_from_csr_to_coo_ops.h> |
965 | #include <ATen/ops/multilabel_margin_loss_backward_native.h> |
966 | #include <ATen/ops/multilabel_margin_loss_backward_ops.h> |
967 | #include <ATen/ops/multilabel_margin_loss_backward_native.h> |
968 | #include <ATen/ops/multilabel_margin_loss_backward_ops.h> |
969 | #include <ATen/ops/nll_loss_native.h> |
970 | #include <ATen/ops/nll_loss_ops.h> |
971 | #include <ATen/ops/nll_loss_native.h> |
972 | #include <ATen/ops/nll_loss_ops.h> |
973 | #include <ATen/ops/nll_loss_backward_native.h> |
974 | #include <ATen/ops/nll_loss_backward_ops.h> |
975 | #include <ATen/ops/nll_loss_backward_native.h> |
976 | #include <ATen/ops/nll_loss_backward_ops.h> |
977 | #include <ATen/ops/nll_loss2d_native.h> |
978 | #include <ATen/ops/nll_loss2d_ops.h> |
979 | #include <ATen/ops/nll_loss2d_native.h> |
980 | #include <ATen/ops/nll_loss2d_ops.h> |
981 | #include <ATen/ops/nll_loss2d_backward_native.h> |
982 | #include <ATen/ops/nll_loss2d_backward_ops.h> |
983 | #include <ATen/ops/nll_loss2d_backward_native.h> |
984 | #include <ATen/ops/nll_loss2d_backward_ops.h> |
985 | #include <ATen/ops/smooth_l1_loss_native.h> |
986 | #include <ATen/ops/smooth_l1_loss_ops.h> |
987 | #include <ATen/ops/smooth_l1_loss_native.h> |
988 | #include <ATen/ops/smooth_l1_loss_ops.h> |
989 | #include <ATen/ops/huber_loss_native.h> |
990 | #include <ATen/ops/huber_loss_ops.h> |
991 | #include <ATen/ops/huber_loss_native.h> |
992 | #include <ATen/ops/huber_loss_ops.h> |
993 | #include <ATen/ops/soft_margin_loss_backward_native.h> |
994 | #include <ATen/ops/soft_margin_loss_backward_ops.h> |
995 | #include <ATen/ops/soft_margin_loss_backward_native.h> |
996 | #include <ATen/ops/soft_margin_loss_backward_ops.h> |
997 | #include <ATen/ops/elu_native.h> |
998 | #include <ATen/ops/elu_ops.h> |
999 | #include <ATen/ops/elu_native.h> |
1000 | #include <ATen/ops/elu_ops.h> |
1001 | #include <ATen/ops/elu_native.h> |
1002 | #include <ATen/ops/elu_ops.h> |
1003 | #include <ATen/ops/glu_native.h> |
1004 | #include <ATen/ops/glu_ops.h> |
1005 | #include <ATen/ops/glu_native.h> |
1006 | #include <ATen/ops/glu_ops.h> |
1007 | #include <ATen/ops/hardsigmoid_native.h> |
1008 | #include <ATen/ops/hardsigmoid_ops.h> |
1009 | #include <ATen/ops/hardsigmoid_native.h> |
1010 | #include <ATen/ops/hardsigmoid_ops.h> |
1011 | #include <ATen/ops/hardsigmoid_native.h> |
1012 | #include <ATen/ops/hardsigmoid_ops.h> |
1013 | #include <ATen/ops/leaky_relu_backward_native.h> |
1014 | #include <ATen/ops/leaky_relu_backward_ops.h> |
1015 | #include <ATen/ops/leaky_relu_backward_native.h> |
1016 | #include <ATen/ops/leaky_relu_backward_ops.h> |
1017 | #include <ATen/ops/softshrink_native.h> |
1018 | #include <ATen/ops/softshrink_ops.h> |
1019 | #include <ATen/ops/softshrink_native.h> |
1020 | #include <ATen/ops/softshrink_ops.h> |
1021 | #include <ATen/ops/_adaptive_avg_pool2d_native.h> |
1022 | #include <ATen/ops/_adaptive_avg_pool2d_ops.h> |
1023 | #include <ATen/ops/_adaptive_avg_pool2d_native.h> |
1024 | #include <ATen/ops/_adaptive_avg_pool2d_ops.h> |
1025 | #include <ATen/ops/_adaptive_avg_pool3d_native.h> |
1026 | #include <ATen/ops/_adaptive_avg_pool3d_ops.h> |
1027 | #include <ATen/ops/_adaptive_avg_pool3d_native.h> |
1028 | #include <ATen/ops/_adaptive_avg_pool3d_ops.h> |
1029 | #include <ATen/ops/avg_pool3d_native.h> |
1030 | #include <ATen/ops/avg_pool3d_ops.h> |
1031 | #include <ATen/ops/avg_pool3d_native.h> |
1032 | #include <ATen/ops/avg_pool3d_ops.h> |
1033 | #include <ATen/ops/fractional_max_pool3d_native.h> |
1034 | #include <ATen/ops/fractional_max_pool3d_ops.h> |
1035 | #include <ATen/ops/fractional_max_pool3d_native.h> |
1036 | #include <ATen/ops/fractional_max_pool3d_ops.h> |
1037 | #include <ATen/ops/max_pool2d_with_indices_backward_native.h> |
1038 | #include <ATen/ops/max_pool2d_with_indices_backward_ops.h> |
1039 | #include <ATen/ops/max_pool2d_with_indices_backward_native.h> |
1040 | #include <ATen/ops/max_pool2d_with_indices_backward_ops.h> |
1041 | #include <ATen/ops/max_unpool3d_native.h> |
1042 | #include <ATen/ops/max_unpool3d_ops.h> |
1043 | #include <ATen/ops/max_unpool3d_native.h> |
1044 | #include <ATen/ops/max_unpool3d_ops.h> |
1045 | #include <ATen/ops/reflection_pad1d_native.h> |
1046 | #include <ATen/ops/reflection_pad1d_ops.h> |
1047 | #include <ATen/ops/reflection_pad1d_native.h> |
1048 | #include <ATen/ops/reflection_pad1d_ops.h> |
1049 | #include <ATen/ops/reflection_pad1d_backward_native.h> |
1050 | #include <ATen/ops/reflection_pad1d_backward_ops.h> |
1051 | #include <ATen/ops/reflection_pad1d_backward_native.h> |
1052 | #include <ATen/ops/reflection_pad1d_backward_ops.h> |
1053 | #include <ATen/ops/reflection_pad2d_native.h> |
1054 | #include <ATen/ops/reflection_pad2d_ops.h> |
1055 | #include <ATen/ops/reflection_pad2d_native.h> |
1056 | #include <ATen/ops/reflection_pad2d_ops.h> |
1057 | #include <ATen/ops/reflection_pad3d_native.h> |
1058 | #include <ATen/ops/reflection_pad3d_ops.h> |
1059 | #include <ATen/ops/reflection_pad3d_native.h> |
1060 | #include <ATen/ops/reflection_pad3d_ops.h> |
1061 | #include <ATen/ops/replication_pad1d_native.h> |
1062 | #include <ATen/ops/replication_pad1d_ops.h> |
1063 | #include <ATen/ops/replication_pad1d_native.h> |
1064 | #include <ATen/ops/replication_pad1d_ops.h> |
1065 | #include <ATen/ops/replication_pad1d_backward_native.h> |
1066 | #include <ATen/ops/replication_pad1d_backward_ops.h> |
1067 | #include <ATen/ops/replication_pad1d_backward_native.h> |
1068 | #include <ATen/ops/replication_pad1d_backward_ops.h> |
1069 | #include <ATen/ops/replication_pad2d_backward_native.h> |
1070 | #include <ATen/ops/replication_pad2d_backward_ops.h> |
1071 | #include <ATen/ops/replication_pad2d_backward_native.h> |
1072 | #include <ATen/ops/replication_pad2d_backward_ops.h> |
1073 | #include <ATen/ops/replication_pad3d_native.h> |
1074 | #include <ATen/ops/replication_pad3d_ops.h> |
1075 | #include <ATen/ops/replication_pad3d_native.h> |
1076 | #include <ATen/ops/replication_pad3d_ops.h> |
1077 | #include <ATen/ops/replication_pad3d_backward_native.h> |
1078 | #include <ATen/ops/replication_pad3d_backward_ops.h> |
1079 | #include <ATen/ops/replication_pad3d_backward_native.h> |
1080 | #include <ATen/ops/replication_pad3d_backward_ops.h> |
1081 | #include <ATen/ops/upsample_nearest2d_native.h> |
1082 | #include <ATen/ops/upsample_nearest2d_ops.h> |
1083 | #include <ATen/ops/upsample_nearest2d_native.h> |
1084 | #include <ATen/ops/upsample_nearest2d_ops.h> |
1085 | #include <ATen/ops/_upsample_nearest_exact2d_native.h> |
1086 | #include <ATen/ops/_upsample_nearest_exact2d_ops.h> |
1087 | #include <ATen/ops/_upsample_nearest_exact2d_native.h> |
1088 | #include <ATen/ops/_upsample_nearest_exact2d_ops.h> |
1089 | #include <ATen/ops/_upsample_nearest_exact2d_backward_native.h> |
1090 | #include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h> |
1091 | #include <ATen/ops/_upsample_nearest_exact2d_backward_native.h> |
1092 | #include <ATen/ops/_upsample_nearest_exact2d_backward_ops.h> |
1093 | #include <ATen/ops/_upsample_nearest_exact3d_backward_native.h> |
1094 | #include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h> |
1095 | #include <ATen/ops/_upsample_nearest_exact3d_backward_native.h> |
1096 | #include <ATen/ops/_upsample_nearest_exact3d_backward_ops.h> |
1097 | #include <ATen/ops/sigmoid_backward_native.h> |
1098 | #include <ATen/ops/sigmoid_backward_ops.h> |
1099 | #include <ATen/ops/sigmoid_backward_native.h> |
1100 | #include <ATen/ops/sigmoid_backward_ops.h> |
1101 | #include <ATen/ops/slow_conv_transpose2d_native.h> |
1102 | #include <ATen/ops/slow_conv_transpose2d_ops.h> |
1103 | #include <ATen/ops/slow_conv_transpose2d_native.h> |
1104 | #include <ATen/ops/slow_conv_transpose2d_ops.h> |
1105 | #include <ATen/ops/_slow_conv2d_forward_native.h> |
1106 | #include <ATen/ops/_slow_conv2d_forward_ops.h> |
1107 | #include <ATen/ops/_slow_conv2d_forward_native.h> |
1108 | #include <ATen/ops/_slow_conv2d_forward_ops.h> |
1109 | #include <ATen/ops/conv_depthwise3d_native.h> |
1110 | #include <ATen/ops/conv_depthwise3d_ops.h> |
1111 | #include <ATen/ops/conv_depthwise3d_native.h> |
1112 | #include <ATen/ops/conv_depthwise3d_ops.h> |
1113 | #include <ATen/ops/slow_conv_dilated2d_native.h> |
1114 | #include <ATen/ops/slow_conv_dilated2d_ops.h> |
1115 | #include <ATen/ops/slow_conv_dilated2d_native.h> |
1116 | #include <ATen/ops/slow_conv_dilated2d_ops.h> |
1117 | #include <ATen/ops/special_ndtri_native.h> |
1118 | #include <ATen/ops/special_ndtri_ops.h> |
1119 | #include <ATen/ops/special_ndtri_native.h> |
1120 | #include <ATen/ops/special_ndtri_ops.h> |
1121 | #include <ATen/ops/special_erfc_native.h> |
1122 | #include <ATen/ops/special_erfc_ops.h> |
1123 | #include <ATen/ops/special_erfc_native.h> |
1124 | #include <ATen/ops/special_erfc_ops.h> |
1125 | #include <ATen/ops/special_logit_native.h> |
1126 | #include <ATen/ops/special_logit_ops.h> |
1127 | #include <ATen/ops/special_logit_native.h> |
1128 | #include <ATen/ops/special_logit_ops.h> |
1129 | #include <ATen/ops/special_polygamma_native.h> |
1130 | #include <ATen/ops/special_polygamma_ops.h> |
1131 | #include <ATen/ops/special_polygamma_native.h> |
1132 | #include <ATen/ops/special_polygamma_ops.h> |
1133 | #include <ATen/ops/special_sinc_native.h> |
1134 | #include <ATen/ops/special_sinc_ops.h> |
1135 | #include <ATen/ops/special_sinc_native.h> |
1136 | #include <ATen/ops/special_sinc_ops.h> |
1137 | #include <ATen/ops/fft_ifft_native.h> |
1138 | #include <ATen/ops/fft_ifft_ops.h> |
1139 | #include <ATen/ops/fft_ifft_native.h> |
1140 | #include <ATen/ops/fft_ifft_ops.h> |
1141 | #include <ATen/ops/fft_ihfft_native.h> |
1142 | #include <ATen/ops/fft_ihfft_ops.h> |
1143 | #include <ATen/ops/fft_ihfft_native.h> |
1144 | #include <ATen/ops/fft_ihfft_ops.h> |
1145 | #include <ATen/ops/fft_ifft2_native.h> |
1146 | #include <ATen/ops/fft_ifft2_ops.h> |
1147 | #include <ATen/ops/fft_ifft2_native.h> |
1148 | #include <ATen/ops/fft_ifft2_ops.h> |
1149 | #include <ATen/ops/fft_ihfftn_native.h> |
1150 | #include <ATen/ops/fft_ihfftn_ops.h> |
1151 | #include <ATen/ops/fft_ihfftn_native.h> |
1152 | #include <ATen/ops/fft_ihfftn_ops.h> |
1153 | #include <ATen/ops/linalg_cholesky_ex_native.h> |
1154 | #include <ATen/ops/linalg_cholesky_ex_ops.h> |
1155 | #include <ATen/ops/linalg_cholesky_ex_native.h> |
1156 | #include <ATen/ops/linalg_cholesky_ex_ops.h> |
1157 | #include <ATen/ops/linalg_cross_native.h> |
1158 | #include <ATen/ops/linalg_cross_ops.h> |
1159 | #include <ATen/ops/linalg_cross_native.h> |
1160 | #include <ATen/ops/linalg_cross_ops.h> |
1161 | #include <ATen/ops/linalg_lu_solve_native.h> |
1162 | #include <ATen/ops/linalg_lu_solve_ops.h> |
1163 | #include <ATen/ops/linalg_lu_solve_native.h> |
1164 | #include <ATen/ops/linalg_lu_solve_ops.h> |
1165 | #include <ATen/ops/linalg_matmul_native.h> |
1166 | #include <ATen/ops/linalg_matmul_ops.h> |
1167 | #include <ATen/ops/linalg_matmul_native.h> |
1168 | #include <ATen/ops/linalg_matmul_ops.h> |
1169 | #include <ATen/ops/linalg_vecdot_native.h> |
1170 | #include <ATen/ops/linalg_vecdot_ops.h> |
1171 | #include <ATen/ops/linalg_vecdot_native.h> |
1172 | #include <ATen/ops/linalg_vecdot_ops.h> |
1173 | #include <ATen/ops/linalg_eigvalsh_native.h> |
1174 | #include <ATen/ops/linalg_eigvalsh_ops.h> |
1175 | #include <ATen/ops/linalg_eigvalsh_native.h> |
1176 | #include <ATen/ops/linalg_eigvalsh_ops.h> |
1177 | #include <ATen/ops/linalg_inv_ex_native.h> |
1178 | #include <ATen/ops/linalg_inv_ex_ops.h> |
1179 | #include <ATen/ops/linalg_inv_ex_native.h> |
1180 | #include <ATen/ops/linalg_inv_ex_ops.h> |
1181 | #include <ATen/ops/inverse_native.h> |
1182 | #include <ATen/ops/inverse_ops.h> |
1183 | #include <ATen/ops/inverse_native.h> |
1184 | #include <ATen/ops/inverse_ops.h> |
1185 | #include <ATen/ops/outer_native.h> |
1186 | #include <ATen/ops/outer_ops.h> |
1187 | #include <ATen/ops/outer_native.h> |
1188 | #include <ATen/ops/outer_ops.h> |
1189 | #include <ATen/ops/linalg_svd_native.h> |
1190 | #include <ATen/ops/linalg_svd_ops.h> |
1191 | #include <ATen/ops/linalg_svd_native.h> |
1192 | #include <ATen/ops/linalg_svd_ops.h> |
1193 | #include <ATen/ops/linalg_cond_native.h> |
1194 | #include <ATen/ops/linalg_cond_ops.h> |
1195 | #include <ATen/ops/linalg_cond_native.h> |
1196 | #include <ATen/ops/linalg_cond_ops.h> |
1197 | #include <ATen/ops/linalg_cond_native.h> |
1198 | #include <ATen/ops/linalg_cond_ops.h> |
1199 | #include <ATen/ops/linalg_cond_native.h> |
1200 | #include <ATen/ops/linalg_cond_ops.h> |
1201 | #include <ATen/ops/linalg_solve_ex_native.h> |
1202 | #include <ATen/ops/linalg_solve_ex_ops.h> |
1203 | #include <ATen/ops/linalg_solve_ex_native.h> |
1204 | #include <ATen/ops/linalg_solve_ex_ops.h> |
1205 | #include <ATen/ops/linalg_tensorsolve_native.h> |
1206 | #include <ATen/ops/linalg_tensorsolve_ops.h> |
1207 | #include <ATen/ops/linalg_tensorsolve_native.h> |
1208 | #include <ATen/ops/linalg_tensorsolve_ops.h> |
1209 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1210 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1211 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1212 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1213 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1214 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1215 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1216 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1217 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1218 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1219 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1220 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1221 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1222 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1223 | #include <ATen/ops/linalg_matrix_rank_native.h> |
1224 | #include <ATen/ops/linalg_matrix_rank_ops.h> |
1225 | #include <ATen/ops/_test_optional_floatlist_native.h> |
1226 | #include <ATen/ops/_test_optional_floatlist_ops.h> |
1227 | #include <ATen/ops/_test_optional_floatlist_native.h> |
1228 | #include <ATen/ops/_test_optional_floatlist_ops.h> |
1229 | #include <ATen/ops/_test_warn_in_autograd_native.h> |
1230 | #include <ATen/ops/_test_warn_in_autograd_ops.h> |
1231 | #include <ATen/ops/_test_warn_in_autograd_native.h> |
1232 | #include <ATen/ops/_test_warn_in_autograd_ops.h> |
1233 | #include <ATen/ops/_segment_reduce_backward_native.h> |
1234 | #include <ATen/ops/_segment_reduce_backward_ops.h> |
1235 | #include <ATen/ops/_segment_reduce_backward_native.h> |
1236 | #include <ATen/ops/_segment_reduce_backward_ops.h> |
1237 | #include <ATen/ops/_sparse_broadcast_to_copy_native.h> |
1238 | #include <ATen/ops/_sparse_broadcast_to_copy_ops.h> |
1239 | #include <ATen/ops/_sparse_broadcast_to_copy_native.h> |
1240 | #include <ATen/ops/_sparse_broadcast_to_copy_ops.h> |
1241 | #include <ATen/ops/unsqueeze_copy_native.h> |
1242 | #include <ATen/ops/unsqueeze_copy_ops.h> |
1243 | #include <ATen/ops/unsqueeze_copy_native.h> |
1244 | #include <ATen/ops/unsqueeze_copy_ops.h> |
1245 | #include <ATen/ops/values_copy_native.h> |
1246 | #include <ATen/ops/values_copy_ops.h> |
1247 | #include <ATen/ops/values_copy_native.h> |
1248 | #include <ATen/ops/values_copy_ops.h> |
1249 | #include <ATen/ops/to_padded_tensor_native.h> |
1250 | #include <ATen/ops/to_padded_tensor_ops.h> |
1251 | #include <ATen/ops/to_padded_tensor_native.h> |
1252 | #include <ATen/ops/to_padded_tensor_ops.h> |
1253 | #include <ATen/ops/_triton_scaled_dot_attention_native.h> |
1254 | #include <ATen/ops/_triton_scaled_dot_attention_ops.h> |
1255 | #include <ATen/ops/_triton_scaled_dot_attention_native.h> |
1256 | #include <ATen/ops/_triton_scaled_dot_attention_ops.h> |
1257 | #include <ATen/ops/special_bessel_y0_native.h> |
1258 | #include <ATen/ops/special_bessel_y0_ops.h> |
1259 | #include <ATen/ops/special_bessel_y0_native.h> |
1260 | #include <ATen/ops/special_bessel_y0_ops.h> |
1261 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
1262 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1263 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
1264 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1265 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
1266 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1267 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
1268 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1269 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
1270 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1271 | #include <ATen/ops/special_chebyshev_polynomial_t_native.h> |
1272 | #include <ATen/ops/special_chebyshev_polynomial_t_ops.h> |
1273 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
1274 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1275 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
1276 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1277 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
1278 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1279 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
1280 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1281 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
1282 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1283 | #include <ATen/ops/special_chebyshev_polynomial_u_native.h> |
1284 | #include <ATen/ops/special_chebyshev_polynomial_u_ops.h> |
1285 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
1286 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1287 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
1288 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1289 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
1290 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1291 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
1292 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1293 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
1294 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1295 | #include <ATen/ops/special_hermite_polynomial_h_native.h> |
1296 | #include <ATen/ops/special_hermite_polynomial_h_ops.h> |
1297 | #include <ATen/ops/special_modified_bessel_k1_native.h> |
1298 | #include <ATen/ops/special_modified_bessel_k1_ops.h> |
1299 | #include <ATen/ops/special_modified_bessel_k1_native.h> |
1300 | #include <ATen/ops/special_modified_bessel_k1_ops.h> |
1301 | #include <ATen/ops/special_scaled_modified_bessel_k0_native.h> |
1302 | #include <ATen/ops/special_scaled_modified_bessel_k0_ops.h> |
1303 | #include <ATen/ops/special_scaled_modified_bessel_k0_native.h> |
1304 | #include <ATen/ops/special_scaled_modified_bessel_k0_ops.h> |
1305 | #include <ATen/ops/special_scaled_modified_bessel_k1_native.h> |
1306 | #include <ATen/ops/special_scaled_modified_bessel_k1_ops.h> |
1307 | #include <ATen/ops/special_scaled_modified_bessel_k1_native.h> |
1308 | #include <ATen/ops/special_scaled_modified_bessel_k1_ops.h> |
1309 | #include <ATen/ops/special_spherical_bessel_j0_native.h> |
1310 | #include <ATen/ops/special_spherical_bessel_j0_ops.h> |
1311 | #include <ATen/ops/special_spherical_bessel_j0_native.h> |
1312 | #include <ATen/ops/special_spherical_bessel_j0_ops.h> |
1313 | #include <ATen/ops/refine_names_native.h> |
1314 | #include <ATen/ops/refine_names_ops.h> |
1315 | #include <ATen/ops/real_native.h> |
1316 | #include <ATen/ops/real_ops.h> |
1317 | #include <ATen/ops/_neg_view_native.h> |
1318 | #include <ATen/ops/_neg_view_ops.h> |
1319 | #include <ATen/ops/_neg_view_copy_native.h> |
1320 | #include <ATen/ops/_neg_view_copy_ops.h> |
1321 | #include <ATen/ops/diagonal_native.h> |
1322 | #include <ATen/ops/diagonal_ops.h> |
1323 | #include <ATen/ops/diagonal_copy_native.h> |
1324 | #include <ATen/ops/diagonal_copy_ops.h> |
1325 | #include <ATen/ops/diagonal_native.h> |
1326 | #include <ATen/ops/diagonal_ops.h> |
1327 | #include <ATen/ops/narrow_native.h> |
1328 | #include <ATen/ops/narrow_ops.h> |
1329 | #include <ATen/ops/narrow_copy_native.h> |
1330 | #include <ATen/ops/narrow_copy_ops.h> |
1331 | #include <ATen/ops/narrow_native.h> |
1332 | #include <ATen/ops/narrow_ops.h> |
1333 | #include <ATen/ops/numpy_T_native.h> |
1334 | #include <ATen/ops/numpy_T_ops.h> |
1335 | #include <ATen/ops/select_native.h> |
1336 | #include <ATen/ops/select_ops.h> |
1337 | #include <ATen/ops/select_native.h> |
1338 | #include <ATen/ops/select_ops.h> |
1339 | #include <ATen/ops/select_copy_native.h> |
1340 | #include <ATen/ops/select_copy_ops.h> |
1341 | #include <ATen/ops/split_with_sizes_native.h> |
1342 | #include <ATen/ops/split_with_sizes_ops.h> |
1343 | #include <ATen/ops/split_with_sizes_copy_native.h> |
1344 | #include <ATen/ops/split_with_sizes_copy_ops.h> |
1345 | #include <ATen/ops/vsplit_native.h> |
1346 | #include <ATen/ops/vsplit_ops.h> |
1347 | #include <ATen/ops/vsplit_native.h> |
1348 | #include <ATen/ops/vsplit_ops.h> |
1349 | #include <ATen/ops/squeeze_native.h> |
1350 | #include <ATen/ops/squeeze_ops.h> |
1351 | #include <ATen/ops/squeeze_copy_native.h> |
1352 | #include <ATen/ops/squeeze_copy_ops.h> |
1353 | #include <ATen/ops/squeeze_native.h> |
1354 | #include <ATen/ops/squeeze_ops.h> |
1355 | #include <ATen/ops/squeeze_copy_native.h> |
1356 | #include <ATen/ops/squeeze_copy_ops.h> |
1357 | #include <ATen/ops/squeeze_native.h> |
1358 | #include <ATen/ops/squeeze_ops.h> |
1359 | #include <ATen/ops/squeeze_native.h> |
1360 | #include <ATen/ops/squeeze_ops.h> |
1361 | #include <ATen/ops/squeeze_copy_native.h> |
1362 | #include <ATen/ops/squeeze_copy_ops.h> |
1363 | #include <ATen/ops/view_as_native.h> |
1364 | #include <ATen/ops/view_as_ops.h> |
1365 | #include <ATen/ops/unbind_native.h> |
1366 | #include <ATen/ops/unbind_ops.h> |
1367 | #include <ATen/ops/unbind_copy_native.h> |
1368 | #include <ATen/ops/unbind_copy_ops.h> |
1369 | #include <ATen/ops/unbind_native.h> |
1370 | #include <ATen/ops/unbind_ops.h> |
1371 | #include <ATen/ops/alias_native.h> |
1372 | #include <ATen/ops/alias_ops.h> |
1373 | #include <ATen/ops/alias_copy_native.h> |
1374 | #include <ATen/ops/alias_copy_ops.h> |
1375 | #include <ATen/ops/_cast_Char_native.h> |
1376 | #include <ATen/ops/_cast_Char_ops.h> |
1377 | #include <ATen/ops/_cast_Half_native.h> |
1378 | #include <ATen/ops/_cast_Half_ops.h> |
1379 | #include <ATen/ops/_backward_native.h> |
1380 | #include <ATen/ops/_backward_ops.h> |
1381 | #include <ATen/ops/set_data_native.h> |
1382 | #include <ATen/ops/set_data_ops.h> |
1383 | #include <ATen/ops/data_native.h> |
1384 | #include <ATen/ops/data_ops.h> |
1385 | #include <ATen/ops/requires_grad_native.h> |
1386 | #include <ATen/ops/requires_grad_ops.h> |
1387 | #include <ATen/ops/align_as_native.h> |
1388 | #include <ATen/ops/align_as_ops.h> |
1389 | #include <ATen/ops/_sobol_engine_ff_native.h> |
1390 | #include <ATen/ops/_sobol_engine_ff_ops.h> |
1391 | #include <ATen/ops/feature_alpha_dropout_native.h> |
1392 | #include <ATen/ops/feature_alpha_dropout_ops.h> |
1393 | #include <ATen/ops/feature_alpha_dropout_native.h> |
1394 | #include <ATen/ops/feature_alpha_dropout_ops.h> |
1395 | #include <ATen/ops/_is_all_true_native.h> |
1396 | #include <ATen/ops/_is_all_true_ops.h> |
1397 | #include <ATen/ops/allclose_native.h> |
1398 | #include <ATen/ops/allclose_ops.h> |
1399 | #include <ATen/ops/atleast_1d_native.h> |
1400 | #include <ATen/ops/atleast_1d_ops.h> |
1401 | #include <ATen/ops/atleast_1d_native.h> |
1402 | #include <ATen/ops/atleast_1d_ops.h> |
1403 | #include <ATen/ops/_batch_norm_impl_index_native.h> |
1404 | #include <ATen/ops/_batch_norm_impl_index_ops.h> |
1405 | #include <ATen/ops/bilinear_native.h> |
1406 | #include <ATen/ops/bilinear_ops.h> |
1407 | #include <ATen/ops/unsafe_chunk_native.h> |
1408 | #include <ATen/ops/unsafe_chunk_ops.h> |
1409 | #include <ATen/ops/conv_transpose1d_native.h> |
1410 | #include <ATen/ops/conv_transpose1d_ops.h> |
1411 | #include <ATen/ops/conv_transpose2d_native.h> |
1412 | #include <ATen/ops/conv_transpose2d_ops.h> |
1413 | #include <ATen/ops/conv_transpose3d_native.h> |
1414 | #include <ATen/ops/conv_transpose3d_ops.h> |
1415 | #include <ATen/ops/cosine_embedding_loss_native.h> |
1416 | #include <ATen/ops/cosine_embedding_loss_ops.h> |
1417 | #include <ATen/ops/cumulative_trapezoid_native.h> |
1418 | #include <ATen/ops/cumulative_trapezoid_ops.h> |
1419 | #include <ATen/ops/cumulative_trapezoid_native.h> |
1420 | #include <ATen/ops/cumulative_trapezoid_ops.h> |
1421 | #include <ATen/ops/ctc_loss_native.h> |
1422 | #include <ATen/ops/ctc_loss_ops.h> |
1423 | #include <ATen/ops/ctc_loss_native.h> |
1424 | #include <ATen/ops/ctc_loss_ops.h> |
1425 | #include <ATen/ops/gradient_native.h> |
1426 | #include <ATen/ops/gradient_ops.h> |
1427 | #include <ATen/ops/gradient_native.h> |
1428 | #include <ATen/ops/gradient_ops.h> |
1429 | #include <ATen/ops/gradient_native.h> |
1430 | #include <ATen/ops/gradient_ops.h> |
1431 | #include <ATen/ops/gradient_native.h> |
1432 | #include <ATen/ops/gradient_ops.h> |
1433 | #include <ATen/ops/gradient_native.h> |
1434 | #include <ATen/ops/gradient_ops.h> |
1435 | #include <ATen/ops/gradient_native.h> |
1436 | #include <ATen/ops/gradient_ops.h> |
1437 | #include <ATen/ops/gradient_native.h> |
1438 | #include <ATen/ops/gradient_ops.h> |
1439 | #include <ATen/ops/einsum_native.h> |
1440 | #include <ATen/ops/einsum_ops.h> |
1441 | #include <ATen/ops/grid_sampler_native.h> |
1442 | #include <ATen/ops/grid_sampler_ops.h> |
1443 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_native.h> |
1444 | #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h> |
1445 | #include <ATen/ops/hinge_embedding_loss_native.h> |
1446 | #include <ATen/ops/hinge_embedding_loss_ops.h> |
1447 | #include <ATen/ops/_cufft_set_plan_cache_max_size_native.h> |
1448 | #include <ATen/ops/_cufft_set_plan_cache_max_size_ops.h> |
1449 | #include <ATen/ops/_cufft_clear_plan_cache_native.h> |
1450 | #include <ATen/ops/_cufft_clear_plan_cache_ops.h> |
1451 | #include <ATen/ops/isclose_native.h> |
1452 | #include <ATen/ops/isclose_ops.h> |
1453 | #include <ATen/ops/is_floating_point_native.h> |
1454 | #include <ATen/ops/is_floating_point_ops.h> |
1455 | #include <ATen/ops/is_conj_native.h> |
1456 | #include <ATen/ops/is_conj_ops.h> |
1457 | #include <ATen/ops/isreal_native.h> |
1458 | #include <ATen/ops/isreal_ops.h> |
1459 | #include <ATen/ops/is_nonzero_native.h> |
1460 | #include <ATen/ops/is_nonzero_ops.h> |
1461 | #include <ATen/ops/layer_norm_native.h> |
1462 | #include <ATen/ops/layer_norm_ops.h> |
1463 | #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h> |
1464 | #include <ATen/ops/fbgemm_pack_gemm_matrix_fp16_ops.h> |
1465 | #include <ATen/ops/fbgemm_pack_quantized_matrix_native.h> |
1466 | #include <ATen/ops/fbgemm_pack_quantized_matrix_ops.h> |
1467 | #include <ATen/ops/fbgemm_pack_quantized_matrix_native.h> |
1468 | #include <ATen/ops/fbgemm_pack_quantized_matrix_ops.h> |
1469 | #include <ATen/ops/value_selecting_reduction_backward_native.h> |
1470 | #include <ATen/ops/value_selecting_reduction_backward_ops.h> |
1471 | #include <ATen/ops/max_pool1d_native.h> |
1472 | #include <ATen/ops/max_pool1d_ops.h> |
1473 | #include <ATen/ops/pairwise_distance_native.h> |
1474 | #include <ATen/ops/pairwise_distance_ops.h> |
1475 | #include <ATen/ops/pdist_native.h> |
1476 | #include <ATen/ops/pdist_ops.h> |
1477 | #include <ATen/ops/_nested_select_backward_native.h> |
1478 | #include <ATen/ops/_nested_select_backward_ops.h> |
1479 | #include <ATen/ops/size_native.h> |
1480 | #include <ATen/ops/size_ops.h> |
1481 | #include <ATen/ops/size_native.h> |
1482 | #include <ATen/ops/size_ops.h> |
1483 | #include <ATen/ops/smm_native.h> |
1484 | #include <ATen/ops/smm_ops.h> |
1485 | #include <ATen/ops/stride_native.h> |
1486 | #include <ATen/ops/stride_ops.h> |
1487 | #include <ATen/ops/stride_native.h> |
1488 | #include <ATen/ops/stride_ops.h> |
1489 | #include <ATen/ops/sum_to_size_native.h> |
1490 | #include <ATen/ops/sum_to_size_ops.h> |
1491 | #include <ATen/ops/std_native.h> |
1492 | #include <ATen/ops/std_ops.h> |
1493 | #include <ATen/ops/one_hot_native.h> |
1494 | #include <ATen/ops/one_hot_ops.h> |
1495 | #include <ATen/ops/fliplr_native.h> |
1496 | #include <ATen/ops/fliplr_ops.h> |
1497 | #include <ATen/ops/triplet_margin_loss_native.h> |
1498 | #include <ATen/ops/triplet_margin_loss_ops.h> |
1499 | #include <ATen/ops/type_as_native.h> |
1500 | #include <ATen/ops/type_as_ops.h> |
1501 | #include <ATen/ops/var_native.h> |
1502 | #include <ATen/ops/var_ops.h> |
1503 | #include <ATen/ops/_sparse_log_softmax_native.h> |
1504 | #include <ATen/ops/_sparse_log_softmax_ops.h> |
1505 | #include <ATen/ops/_sparse_log_softmax_native.h> |
1506 | #include <ATen/ops/_sparse_log_softmax_ops.h> |
1507 | #include <ATen/ops/sparse_csc_tensor_native.h> |
1508 | #include <ATen/ops/sparse_csc_tensor_ops.h> |
1509 | #include <ATen/ops/sparse_csc_tensor_native.h> |
1510 | #include <ATen/ops/sparse_csc_tensor_ops.h> |
1511 | #include <ATen/ops/_sparse_bsr_tensor_unsafe_native.h> |
1512 | #include <ATen/ops/_sparse_bsr_tensor_unsafe_ops.h> |
1513 | #include <ATen/ops/_validate_sparse_compressed_tensor_args_native.h> |
1514 | #include <ATen/ops/_validate_sparse_compressed_tensor_args_ops.h> |
1515 | #include <ATen/ops/_validate_sparse_csr_tensor_args_native.h> |
1516 | #include <ATen/ops/_validate_sparse_csr_tensor_args_ops.h> |
1517 | #include <ATen/ops/_validate_sparse_csc_tensor_args_native.h> |
1518 | #include <ATen/ops/_validate_sparse_csc_tensor_args_ops.h> |
1519 | #include <ATen/ops/_to_cpu_native.h> |
1520 | #include <ATen/ops/_to_cpu_ops.h> |
1521 | #include <ATen/ops/_dimI_native.h> |
1522 | #include <ATen/ops/_dimI_ops.h> |
1523 | #include <ATen/ops/q_per_channel_axis_native.h> |
1524 | #include <ATen/ops/q_per_channel_axis_ops.h> |
1525 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h> |
1526 | #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_ops.h> |
1527 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h> |
1528 | #include <ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_ops.h> |
1529 | #include <ATen/ops/rnn_tanh_cell_native.h> |
1530 | #include <ATen/ops/rnn_tanh_cell_ops.h> |
1531 | #include <ATen/ops/set_native.h> |
1532 | #include <ATen/ops/set_ops.h> |
1533 | #include <ATen/ops/xor_native.h> |
1534 | #include <ATen/ops/xor_ops.h> |
1535 | #include <ATen/ops/xor_native.h> |
1536 | #include <ATen/ops/xor_ops.h> |
1537 | #include <ATen/ops/xor_native.h> |
1538 | #include <ATen/ops/xor_ops.h> |
1539 | #include <ATen/ops/xor_native.h> |
1540 | #include <ATen/ops/xor_ops.h> |
1541 | #include <ATen/ops/trace_backward_native.h> |
1542 | #include <ATen/ops/trace_backward_ops.h> |
1543 | #include <ATen/ops/masked_select_backward_native.h> |
1544 | #include <ATen/ops/masked_select_backward_ops.h> |
1545 | #include <ATen/ops/argwhere_native.h> |
1546 | #include <ATen/ops/argwhere_ops.h> |
1547 | #include <ATen/ops/cross_entropy_loss_native.h> |
1548 | #include <ATen/ops/cross_entropy_loss_ops.h> |
1549 | #include <ATen/ops/equal_native.h> |
1550 | #include <ATen/ops/equal_ops.h> |
1551 | #include <ATen/ops/adaptive_avg_pool3d_backward_native.h> |
1552 | #include <ATen/ops/adaptive_avg_pool3d_backward_ops.h> |
1553 | #include <ATen/ops/_pad_enum_native.h> |
1554 | #include <ATen/ops/_pad_enum_ops.h> |
1555 | #include <ATen/ops/pad_native.h> |
1556 | #include <ATen/ops/pad_ops.h> |
1557 | #include <ATen/ops/upsample_nearest2d_native.h> |
1558 | #include <ATen/ops/upsample_nearest2d_ops.h> |
1559 | #include <ATen/ops/_upsample_nearest_exact2d_native.h> |
1560 | #include <ATen/ops/_upsample_nearest_exact2d_ops.h> |
1561 | #include <ATen/ops/_add_batch_dim_native.h> |
1562 | #include <ATen/ops/_add_batch_dim_ops.h> |
1563 | #include <ATen/ops/_remove_batch_dim_native.h> |
1564 | #include <ATen/ops/_remove_batch_dim_ops.h> |
1565 | #include <ATen/ops/fft_ifftshift_native.h> |
1566 | #include <ATen/ops/fft_ifftshift_ops.h> |
1567 | #include <ATen/ops/_test_serialization_subcmul_native.h> |
1568 | #include <ATen/ops/_test_serialization_subcmul_ops.h> |
1569 | #include <ATen/ops/unflatten_dense_tensors_native.h> |
1570 | #include <ATen/ops/unflatten_dense_tensors_ops.h> |
1571 | #include <ATen/ops/_scaled_dot_product_flash_attention_native.h> |
1572 | #include <ATen/ops/_scaled_dot_product_flash_attention_ops.h> |
1573 | #include <ATen/ops/_scaled_dot_product_efficient_attention_native.h> |
1574 | #include <ATen/ops/_scaled_dot_product_efficient_attention_ops.h> |
1575 | #include <ATen/ops/_flash_attention_backward_native.h> |
1576 | #include <ATen/ops/_flash_attention_backward_ops.h> |
1577 | #include <ATen/ops/_efficient_attention_forward_native.h> |
1578 | #include <ATen/ops/_efficient_attention_forward_ops.h> |
1579 | #endif |
1580 | |
1581 | namespace at { |
1582 | namespace functionalization { |
1583 | |
1584 | // This keyset is used by functionalization when it calls into meta kernels |
1585 | // to accurately propagate stride metadata. |
1586 | // Exclude any modes: the purpose of calling into meta kernels is only as an implementation |
1587 | // detail to perform shape inference, and we don't want any modal keys to run. |
1588 | // Specifically, we want to prevent functionalization and Python modes from running. |
1589 | constexpr auto exclude_keys_for_meta_dispatch = |
1590 | c10::functorch_transforms_ks | |
1591 | c10::DispatchKeySet({ |
1592 | c10::DispatchKey::FuncTorchDynamicLayerBackMode, |
1593 | c10::DispatchKey::FuncTorchDynamicLayerFrontMode, |
1594 | c10::DispatchKey::Python |
1595 | }); |
1596 | |
1597 | |
1598 | inline Tensor to_meta(const Tensor& t) { |
1599 | if (!t.defined()) return t; |
1600 | return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(), |
1601 | /*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()), |
1602 | /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt); |
1603 | } |
1604 | |
1605 | inline c10::optional<Tensor> to_meta(const c10::optional<Tensor>& t) { |
1606 | if (t.has_value()) { |
1607 | return c10::make_optional<Tensor>(to_meta(*t)); |
1608 | } |
1609 | return c10::nullopt; |
1610 | } |
1611 | |
1612 | inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) { |
1613 | std::vector<Tensor> outputs; |
1614 | outputs.reserve(t_list.size()); |
1615 | for (const auto& tensor : t_list) { |
1616 | outputs.push_back(to_meta(tensor)); |
1617 | } |
1618 | return outputs; |
1619 | } |
1620 | |
1621 | inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) { |
1622 | c10::List<Tensor> outputs; |
1623 | outputs.reserve(t_list.size()); |
1624 | for (const auto i : c10::irange(t_list.size())) { |
1625 | outputs.push_back(to_meta(t_list[i])); |
1626 | } |
1627 | return outputs; |
1628 | } |
1629 | |
1630 | inline c10::List<c10::optional<Tensor>> to_meta(const c10::List<c10::optional<Tensor>>& t_list) { |
1631 | c10::List<c10::optional<Tensor>> outputs; |
1632 | outputs.reserve(t_list.size()); |
1633 | for (const auto i : c10::irange(t_list.size())) { |
1634 | outputs.push_back(to_meta(t_list[i])); |
1635 | } |
1636 | return outputs; |
1637 | } |
1638 | |
1639 | |
1640 | |
1641 | at::Tensor & _new_zeros_with_same_feature_meta_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { |
1642 | if (false) { |
1643 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1644 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1645 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1646 | auto self_meta = to_meta(self); |
1647 | auto other_meta = to_meta(other); |
1648 | auto out_meta = to_meta(out); |
1649 | at::AutoDispatchSkipFunctionalize func_guard; |
1650 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1651 | at::_ops::_new_zeros_with_same_feature_meta_out::call(self_meta, other_meta, self_num_batch_dims, out_meta); |
1652 | } |
1653 | |
1654 | at::Tensor self_; |
1655 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1656 | at::functionalization::impl::sync(self); |
1657 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1658 | } else { |
1659 | self_ = self; |
1660 | } |
1661 | |
1662 | at::Tensor other_; |
1663 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
1664 | at::functionalization::impl::sync(other); |
1665 | other_ = at::functionalization::impl::from_functional_tensor(other); |
1666 | } else { |
1667 | other_ = other; |
1668 | } |
1669 | |
1670 | at::Tensor out_; |
1671 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1672 | at::functionalization::impl::sync(out); |
1673 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1674 | } else { |
1675 | out_ = out; |
1676 | } |
1677 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1678 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
1679 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1680 | TORCH_INTERNAL_ASSERT(false, |
1681 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1682 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1683 | } else { |
1684 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1685 | at::AutoDispatchSkipFunctionalize guard; |
1686 | at::Tensor tmp_output = at::_ops::_new_zeros_with_same_feature_meta_out::call(self_, other_, self_num_batch_dims, out_); |
1687 | return out;; |
1688 | } |
1689 | } else { |
1690 | at::Tensor tmp_output; |
1691 | { |
1692 | at::AutoDispatchSkipFunctionalize guard; |
1693 | tmp_output = at::_ops::_new_zeros_with_same_feature_meta::call(self_, other_, self_num_batch_dims); |
1694 | } |
1695 | at::functionalization::impl::replace_(out, tmp_output); |
1696 | at::functionalization::impl::commit_update(out); |
1697 | at::functionalization::impl::sync(out); |
1698 | return out; |
1699 | } |
1700 | } |
1701 | |
1702 | at::Tensor & _cudnn_init_dropout_state_out_out(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out) { |
1703 | if (false) { |
1704 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1705 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1706 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1707 | auto out_meta = to_meta(out); |
1708 | at::AutoDispatchSkipFunctionalize func_guard; |
1709 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1710 | at::_ops::_cudnn_init_dropout_state_out::call(dropout, train, dropout_seed, out_meta); |
1711 | } |
1712 | |
1713 | at::Tensor out_; |
1714 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1715 | at::functionalization::impl::sync(out); |
1716 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1717 | } else { |
1718 | out_ = out; |
1719 | } |
1720 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1721 | if ((false)) { |
1722 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1723 | TORCH_INTERNAL_ASSERT(false, |
1724 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1725 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1726 | } else { |
1727 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1728 | at::AutoDispatchSkipFunctionalize guard; |
1729 | at::Tensor tmp_output = at::_ops::_cudnn_init_dropout_state_out::call(dropout, train, dropout_seed, out_); |
1730 | return out;; |
1731 | } |
1732 | } else { |
1733 | at::Tensor tmp_output; |
1734 | { |
1735 | at::AutoDispatchSkipFunctionalize guard; |
1736 | tmp_output = at::_ops::_cudnn_init_dropout_state::call(dropout, train, dropout_seed, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
1737 | } |
1738 | at::functionalization::impl::replace_(out, tmp_output); |
1739 | at::functionalization::impl::commit_update(out); |
1740 | at::functionalization::impl::sync(out); |
1741 | return out; |
1742 | } |
1743 | } |
1744 | |
1745 | at::Tensor & angle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
1746 | if (false) { |
1747 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1748 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1749 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1750 | auto self_meta = to_meta(self); |
1751 | auto out_meta = to_meta(out); |
1752 | at::AutoDispatchSkipFunctionalize func_guard; |
1753 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1754 | at::_ops::angle_out::call(self_meta, out_meta); |
1755 | } |
1756 | |
1757 | at::Tensor self_; |
1758 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1759 | at::functionalization::impl::sync(self); |
1760 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1761 | } else { |
1762 | self_ = self; |
1763 | } |
1764 | |
1765 | at::Tensor out_; |
1766 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1767 | at::functionalization::impl::sync(out); |
1768 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1769 | } else { |
1770 | out_ = out; |
1771 | } |
1772 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1773 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
1774 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1775 | TORCH_INTERNAL_ASSERT(false, |
1776 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1777 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1778 | } else { |
1779 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1780 | at::AutoDispatchSkipFunctionalize guard; |
1781 | at::Tensor tmp_output = at::_ops::angle_out::call(self_, out_); |
1782 | return out;; |
1783 | } |
1784 | } else { |
1785 | at::Tensor tmp_output; |
1786 | { |
1787 | at::AutoDispatchSkipFunctionalize guard; |
1788 | tmp_output = at::_ops::angle::call(self_); |
1789 | } |
1790 | at::functionalization::impl::replace_(out, tmp_output); |
1791 | at::functionalization::impl::commit_update(out); |
1792 | at::functionalization::impl::sync(out); |
1793 | return out; |
1794 | } |
1795 | } |
1796 | |
1797 | at::Tensor & sgn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
1798 | if (false) { |
1799 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1800 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1801 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1802 | auto self_meta = to_meta(self); |
1803 | auto out_meta = to_meta(out); |
1804 | at::AutoDispatchSkipFunctionalize func_guard; |
1805 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1806 | at::_ops::sgn_out::call(self_meta, out_meta); |
1807 | } |
1808 | |
1809 | at::Tensor self_; |
1810 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1811 | at::functionalization::impl::sync(self); |
1812 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1813 | } else { |
1814 | self_ = self; |
1815 | } |
1816 | |
1817 | at::Tensor out_; |
1818 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1819 | at::functionalization::impl::sync(out); |
1820 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1821 | } else { |
1822 | out_ = out; |
1823 | } |
1824 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1825 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
1826 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1827 | TORCH_INTERNAL_ASSERT(false, |
1828 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1829 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1830 | } else { |
1831 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1832 | at::AutoDispatchSkipFunctionalize guard; |
1833 | at::Tensor tmp_output = at::_ops::sgn_out::call(self_, out_); |
1834 | return out;; |
1835 | } |
1836 | } else { |
1837 | at::Tensor tmp_output; |
1838 | { |
1839 | at::AutoDispatchSkipFunctionalize guard; |
1840 | tmp_output = at::_ops::sgn::call(self_); |
1841 | } |
1842 | at::functionalization::impl::replace_(out, tmp_output); |
1843 | at::functionalization::impl::commit_update(out); |
1844 | at::functionalization::impl::sync(out); |
1845 | return out; |
1846 | } |
1847 | } |
1848 | |
1849 | at::Tensor & sgn_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
1850 | if (true) { |
1851 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1852 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1853 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1854 | auto self_meta = to_meta(self); |
1855 | at::AutoDispatchSkipFunctionalize func_guard; |
1856 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1857 | at::_ops::sgn_::call(self_meta); |
1858 | } |
1859 | |
1860 | at::Tensor self_; |
1861 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1862 | at::functionalization::impl::sync(self); |
1863 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1864 | } else { |
1865 | self_ = self; |
1866 | } |
1867 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
1868 | if ((false)) { |
1869 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1870 | TORCH_INTERNAL_ASSERT(false, |
1871 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1872 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1873 | } else { |
1874 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1875 | at::AutoDispatchSkipFunctionalize guard; |
1876 | at::Tensor tmp_output = at::_ops::sgn_::call(self_); |
1877 | return self;; |
1878 | } |
1879 | } else { |
1880 | at::Tensor tmp_output; |
1881 | { |
1882 | at::AutoDispatchSkipFunctionalize guard; |
1883 | tmp_output = at::_ops::sgn::call(self_); |
1884 | } |
1885 | at::functionalization::impl::replace_(self, tmp_output); |
1886 | at::functionalization::impl::commit_update(self); |
1887 | at::functionalization::impl::sync(self); |
1888 | return self; |
1889 | } |
1890 | } |
1891 | |
1892 | at::Tensor & _add_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
1893 | if (false) { |
1894 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1895 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1896 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1897 | auto self_meta = to_meta(self); |
1898 | auto other_meta = to_meta(other); |
1899 | auto out_meta = to_meta(out); |
1900 | at::AutoDispatchSkipFunctionalize func_guard; |
1901 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1902 | at::_ops::_add_relu_out::call(self_meta, other_meta, alpha, out_meta); |
1903 | } |
1904 | |
1905 | at::Tensor self_; |
1906 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1907 | at::functionalization::impl::sync(self); |
1908 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1909 | } else { |
1910 | self_ = self; |
1911 | } |
1912 | |
1913 | at::Tensor other_; |
1914 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
1915 | at::functionalization::impl::sync(other); |
1916 | other_ = at::functionalization::impl::from_functional_tensor(other); |
1917 | } else { |
1918 | other_ = other; |
1919 | } |
1920 | |
1921 | at::Tensor out_; |
1922 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
1923 | at::functionalization::impl::sync(out); |
1924 | out_ = at::functionalization::impl::from_functional_tensor(out); |
1925 | } else { |
1926 | out_ = out; |
1927 | } |
1928 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
1929 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
1930 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1931 | TORCH_INTERNAL_ASSERT(false, |
1932 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1933 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1934 | } else { |
1935 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1936 | at::AutoDispatchSkipFunctionalize guard; |
1937 | at::Tensor tmp_output = at::_ops::_add_relu_out::call(self_, other_, alpha, out_); |
1938 | return out;; |
1939 | } |
1940 | } else { |
1941 | at::Tensor tmp_output; |
1942 | { |
1943 | at::AutoDispatchSkipFunctionalize guard; |
1944 | tmp_output = at::_ops::_add_relu_Tensor::call(self_, other_, alpha); |
1945 | } |
1946 | at::functionalization::impl::replace_(out, tmp_output); |
1947 | at::functionalization::impl::commit_update(out); |
1948 | at::functionalization::impl::sync(out); |
1949 | return out; |
1950 | } |
1951 | } |
1952 | |
1953 | at::Tensor & _add_relu__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
1954 | if (true) { |
1955 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
1956 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
1957 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
1958 | auto self_meta = to_meta(self); |
1959 | auto other_meta = to_meta(other); |
1960 | at::AutoDispatchSkipFunctionalize func_guard; |
1961 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
1962 | at::_ops::_add_relu__Tensor::call(self_meta, other_meta, alpha); |
1963 | } |
1964 | |
1965 | at::Tensor self_; |
1966 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
1967 | at::functionalization::impl::sync(self); |
1968 | self_ = at::functionalization::impl::from_functional_tensor(self); |
1969 | } else { |
1970 | self_ = self; |
1971 | } |
1972 | |
1973 | at::Tensor other_; |
1974 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
1975 | at::functionalization::impl::sync(other); |
1976 | other_ = at::functionalization::impl::from_functional_tensor(other); |
1977 | } else { |
1978 | other_ = other; |
1979 | } |
1980 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
1981 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
1982 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
1983 | TORCH_INTERNAL_ASSERT(false, |
1984 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
1985 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
1986 | } else { |
1987 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
1988 | at::AutoDispatchSkipFunctionalize guard; |
1989 | at::Tensor tmp_output = at::_ops::_add_relu__Tensor::call(self_, other_, alpha); |
1990 | return self;; |
1991 | } |
1992 | } else { |
1993 | at::Tensor tmp_output; |
1994 | { |
1995 | at::AutoDispatchSkipFunctionalize guard; |
1996 | tmp_output = at::_ops::_add_relu_Tensor::call(self_, other_, alpha); |
1997 | } |
1998 | at::functionalization::impl::replace_(self, tmp_output); |
1999 | at::functionalization::impl::commit_update(self); |
2000 | at::functionalization::impl::sync(self); |
2001 | return self; |
2002 | } |
2003 | } |
2004 | |
2005 | at::Tensor & _add_relu_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
2006 | if (false) { |
2007 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2008 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2009 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2010 | auto self_meta = to_meta(self); |
2011 | auto out_meta = to_meta(out); |
2012 | at::AutoDispatchSkipFunctionalize func_guard; |
2013 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2014 | at::_ops::_add_relu_Scalar_out::call(self_meta, other, alpha, out_meta); |
2015 | } |
2016 | |
2017 | at::Tensor self_; |
2018 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2019 | at::functionalization::impl::sync(self); |
2020 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2021 | } else { |
2022 | self_ = self; |
2023 | } |
2024 | |
2025 | at::Tensor out_; |
2026 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2027 | at::functionalization::impl::sync(out); |
2028 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2029 | } else { |
2030 | out_ = out; |
2031 | } |
2032 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2033 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2034 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2035 | TORCH_INTERNAL_ASSERT(false, |
2036 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2037 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2038 | } else { |
2039 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2040 | at::AutoDispatchSkipFunctionalize guard; |
2041 | at::Tensor tmp_output = at::_ops::_add_relu_Scalar_out::call(self_, other, alpha, out_); |
2042 | return out;; |
2043 | } |
2044 | } else { |
2045 | at::Tensor tmp_output; |
2046 | { |
2047 | at::AutoDispatchSkipFunctionalize guard; |
2048 | tmp_output = at::_ops::_add_relu_Scalar::call(self_, other, alpha); |
2049 | } |
2050 | at::functionalization::impl::replace_(out, tmp_output); |
2051 | at::functionalization::impl::commit_update(out); |
2052 | at::functionalization::impl::sync(out); |
2053 | return out; |
2054 | } |
2055 | } |
2056 | |
2057 | at::Tensor & _add_relu__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
2058 | if (true) { |
2059 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2060 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2061 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2062 | auto self_meta = to_meta(self); |
2063 | at::AutoDispatchSkipFunctionalize func_guard; |
2064 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2065 | at::_ops::_add_relu__Scalar::call(self_meta, other, alpha); |
2066 | } |
2067 | |
2068 | at::Tensor self_; |
2069 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2070 | at::functionalization::impl::sync(self); |
2071 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2072 | } else { |
2073 | self_ = self; |
2074 | } |
2075 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2076 | if ((false)) { |
2077 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2078 | TORCH_INTERNAL_ASSERT(false, |
2079 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2080 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2081 | } else { |
2082 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2083 | at::AutoDispatchSkipFunctionalize guard; |
2084 | at::Tensor tmp_output = at::_ops::_add_relu__Scalar::call(self_, other, alpha); |
2085 | return self;; |
2086 | } |
2087 | } else { |
2088 | at::Tensor tmp_output; |
2089 | { |
2090 | at::AutoDispatchSkipFunctionalize guard; |
2091 | tmp_output = at::_ops::_add_relu_Scalar::call(self_, other, alpha); |
2092 | } |
2093 | at::functionalization::impl::replace_(self, tmp_output); |
2094 | at::functionalization::impl::commit_update(self); |
2095 | at::functionalization::impl::sync(self); |
2096 | return self; |
2097 | } |
2098 | } |
2099 | |
2100 | at::Tensor & any_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { |
2101 | if (false) { |
2102 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2103 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2104 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2105 | auto self_meta = to_meta(self); |
2106 | auto out_meta = to_meta(out); |
2107 | at::AutoDispatchSkipFunctionalize func_guard; |
2108 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2109 | at::_ops::any_out::call(self_meta, dim, keepdim, out_meta); |
2110 | } |
2111 | |
2112 | at::Tensor self_; |
2113 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2114 | at::functionalization::impl::sync(self); |
2115 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2116 | } else { |
2117 | self_ = self; |
2118 | } |
2119 | |
2120 | at::Tensor out_; |
2121 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2122 | at::functionalization::impl::sync(out); |
2123 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2124 | } else { |
2125 | out_ = out; |
2126 | } |
2127 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2128 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2129 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2130 | TORCH_INTERNAL_ASSERT(false, |
2131 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2132 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2133 | } else { |
2134 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2135 | at::AutoDispatchSkipFunctionalize guard; |
2136 | at::Tensor tmp_output = at::_ops::any_out::call(self_, dim, keepdim, out_); |
2137 | return out;; |
2138 | } |
2139 | } else { |
2140 | at::Tensor tmp_output; |
2141 | { |
2142 | at::AutoDispatchSkipFunctionalize guard; |
2143 | tmp_output = at::_ops::any_dim::call(self_, dim, keepdim); |
2144 | } |
2145 | at::functionalization::impl::replace_(out, tmp_output); |
2146 | at::functionalization::impl::commit_update(out); |
2147 | at::functionalization::impl::sync(out); |
2148 | return out; |
2149 | } |
2150 | } |
2151 | |
2152 | at::Tensor & any_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { |
2153 | if (false) { |
2154 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2155 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2156 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2157 | auto self_meta = to_meta(self); |
2158 | auto out_meta = to_meta(out); |
2159 | at::AutoDispatchSkipFunctionalize func_guard; |
2160 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2161 | at::_ops::any_dimname_out::call(self_meta, dim, keepdim, out_meta); |
2162 | } |
2163 | |
2164 | at::Tensor self_; |
2165 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2166 | at::functionalization::impl::sync(self); |
2167 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2168 | } else { |
2169 | self_ = self; |
2170 | } |
2171 | |
2172 | at::Tensor out_; |
2173 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2174 | at::functionalization::impl::sync(out); |
2175 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2176 | } else { |
2177 | out_ = out; |
2178 | } |
2179 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2180 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2181 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2182 | TORCH_INTERNAL_ASSERT(false, |
2183 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2184 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2185 | } else { |
2186 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2187 | at::AutoDispatchSkipFunctionalize guard; |
2188 | at::Tensor tmp_output = at::_ops::any_dimname_out::call(self_, dim, keepdim, out_); |
2189 | return out;; |
2190 | } |
2191 | } else { |
2192 | at::Tensor tmp_output; |
2193 | { |
2194 | at::AutoDispatchSkipFunctionalize guard; |
2195 | tmp_output = at::_ops::any_dimname::call(self_, dim, keepdim); |
2196 | } |
2197 | at::functionalization::impl::replace_(out, tmp_output); |
2198 | at::functionalization::impl::commit_update(out); |
2199 | at::functionalization::impl::sync(out); |
2200 | return out; |
2201 | } |
2202 | } |
2203 | |
2204 | at::Tensor & argmin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out) { |
2205 | if (false) { |
2206 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2207 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2208 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2209 | auto self_meta = to_meta(self); |
2210 | auto out_meta = to_meta(out); |
2211 | at::AutoDispatchSkipFunctionalize func_guard; |
2212 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2213 | at::_ops::argmin_out::call(self_meta, dim, keepdim, out_meta); |
2214 | } |
2215 | |
2216 | at::Tensor self_; |
2217 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2218 | at::functionalization::impl::sync(self); |
2219 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2220 | } else { |
2221 | self_ = self; |
2222 | } |
2223 | |
2224 | at::Tensor out_; |
2225 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2226 | at::functionalization::impl::sync(out); |
2227 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2228 | } else { |
2229 | out_ = out; |
2230 | } |
2231 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2232 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2233 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2234 | TORCH_INTERNAL_ASSERT(false, |
2235 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2236 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2237 | } else { |
2238 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2239 | at::AutoDispatchSkipFunctionalize guard; |
2240 | at::Tensor tmp_output = at::_ops::argmin_out::call(self_, dim, keepdim, out_); |
2241 | return out;; |
2242 | } |
2243 | } else { |
2244 | at::Tensor tmp_output; |
2245 | { |
2246 | at::AutoDispatchSkipFunctionalize guard; |
2247 | tmp_output = at::_ops::argmin::call(self_, dim, keepdim); |
2248 | } |
2249 | at::functionalization::impl::replace_(out, tmp_output); |
2250 | at::functionalization::impl::commit_update(out); |
2251 | at::functionalization::impl::sync(out); |
2252 | return out; |
2253 | } |
2254 | } |
2255 | |
2256 | at::Tensor & acosh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
2257 | if (false) { |
2258 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2259 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2260 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2261 | auto self_meta = to_meta(self); |
2262 | auto out_meta = to_meta(out); |
2263 | at::AutoDispatchSkipFunctionalize func_guard; |
2264 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2265 | at::_ops::acosh_out::call(self_meta, out_meta); |
2266 | } |
2267 | |
2268 | at::Tensor self_; |
2269 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2270 | at::functionalization::impl::sync(self); |
2271 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2272 | } else { |
2273 | self_ = self; |
2274 | } |
2275 | |
2276 | at::Tensor out_; |
2277 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2278 | at::functionalization::impl::sync(out); |
2279 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2280 | } else { |
2281 | out_ = out; |
2282 | } |
2283 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2284 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2285 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2286 | TORCH_INTERNAL_ASSERT(false, |
2287 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2288 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2289 | } else { |
2290 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2291 | at::AutoDispatchSkipFunctionalize guard; |
2292 | at::Tensor tmp_output = at::_ops::acosh_out::call(self_, out_); |
2293 | return out;; |
2294 | } |
2295 | } else { |
2296 | at::Tensor tmp_output; |
2297 | { |
2298 | at::AutoDispatchSkipFunctionalize guard; |
2299 | tmp_output = at::_ops::acosh::call(self_); |
2300 | } |
2301 | at::functionalization::impl::replace_(out, tmp_output); |
2302 | at::functionalization::impl::commit_update(out); |
2303 | at::functionalization::impl::sync(out); |
2304 | return out; |
2305 | } |
2306 | } |
2307 | |
2308 | at::Tensor & acosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2309 | if (true) { |
2310 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2311 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2312 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2313 | auto self_meta = to_meta(self); |
2314 | at::AutoDispatchSkipFunctionalize func_guard; |
2315 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2316 | at::_ops::acosh_::call(self_meta); |
2317 | } |
2318 | |
2319 | at::Tensor self_; |
2320 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2321 | at::functionalization::impl::sync(self); |
2322 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2323 | } else { |
2324 | self_ = self; |
2325 | } |
2326 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2327 | if ((false)) { |
2328 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2329 | TORCH_INTERNAL_ASSERT(false, |
2330 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2331 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2332 | } else { |
2333 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2334 | at::AutoDispatchSkipFunctionalize guard; |
2335 | at::Tensor tmp_output = at::_ops::acosh_::call(self_); |
2336 | return self;; |
2337 | } |
2338 | } else { |
2339 | at::Tensor tmp_output; |
2340 | { |
2341 | at::AutoDispatchSkipFunctionalize guard; |
2342 | tmp_output = at::_ops::acosh::call(self_); |
2343 | } |
2344 | at::functionalization::impl::replace_(self, tmp_output); |
2345 | at::functionalization::impl::commit_update(self); |
2346 | at::functionalization::impl::sync(self); |
2347 | return self; |
2348 | } |
2349 | } |
2350 | |
2351 | at::Tensor & arcsin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
2352 | if (false) { |
2353 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2354 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2355 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2356 | auto self_meta = to_meta(self); |
2357 | auto out_meta = to_meta(out); |
2358 | at::AutoDispatchSkipFunctionalize func_guard; |
2359 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2360 | at::_ops::arcsin_out::call(self_meta, out_meta); |
2361 | } |
2362 | |
2363 | at::Tensor self_; |
2364 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2365 | at::functionalization::impl::sync(self); |
2366 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2367 | } else { |
2368 | self_ = self; |
2369 | } |
2370 | |
2371 | at::Tensor out_; |
2372 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2373 | at::functionalization::impl::sync(out); |
2374 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2375 | } else { |
2376 | out_ = out; |
2377 | } |
2378 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2379 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2380 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2381 | TORCH_INTERNAL_ASSERT(false, |
2382 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2383 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2384 | } else { |
2385 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2386 | at::AutoDispatchSkipFunctionalize guard; |
2387 | at::Tensor tmp_output = at::_ops::arcsin_out::call(self_, out_); |
2388 | return out;; |
2389 | } |
2390 | } else { |
2391 | at::Tensor tmp_output; |
2392 | { |
2393 | at::AutoDispatchSkipFunctionalize guard; |
2394 | tmp_output = at::_ops::arcsin::call(self_); |
2395 | } |
2396 | at::functionalization::impl::replace_(out, tmp_output); |
2397 | at::functionalization::impl::commit_update(out); |
2398 | at::functionalization::impl::sync(out); |
2399 | return out; |
2400 | } |
2401 | } |
2402 | |
2403 | at::Tensor & arcsin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2404 | if (true) { |
2405 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2406 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2407 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2408 | auto self_meta = to_meta(self); |
2409 | at::AutoDispatchSkipFunctionalize func_guard; |
2410 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2411 | at::_ops::arcsin_::call(self_meta); |
2412 | } |
2413 | |
2414 | at::Tensor self_; |
2415 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2416 | at::functionalization::impl::sync(self); |
2417 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2418 | } else { |
2419 | self_ = self; |
2420 | } |
2421 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2422 | if ((false)) { |
2423 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2424 | TORCH_INTERNAL_ASSERT(false, |
2425 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2426 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2427 | } else { |
2428 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2429 | at::AutoDispatchSkipFunctionalize guard; |
2430 | at::Tensor tmp_output = at::_ops::arcsin_::call(self_); |
2431 | return self;; |
2432 | } |
2433 | } else { |
2434 | at::Tensor tmp_output; |
2435 | { |
2436 | at::AutoDispatchSkipFunctionalize guard; |
2437 | tmp_output = at::_ops::arcsin::call(self_); |
2438 | } |
2439 | at::functionalization::impl::replace_(self, tmp_output); |
2440 | at::functionalization::impl::commit_update(self); |
2441 | at::functionalization::impl::sync(self); |
2442 | return self; |
2443 | } |
2444 | } |
2445 | |
2446 | at::Tensor & arctan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
2447 | if (false) { |
2448 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2449 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2450 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2451 | auto self_meta = to_meta(self); |
2452 | auto out_meta = to_meta(out); |
2453 | at::AutoDispatchSkipFunctionalize func_guard; |
2454 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2455 | at::_ops::arctan_out::call(self_meta, out_meta); |
2456 | } |
2457 | |
2458 | at::Tensor self_; |
2459 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2460 | at::functionalization::impl::sync(self); |
2461 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2462 | } else { |
2463 | self_ = self; |
2464 | } |
2465 | |
2466 | at::Tensor out_; |
2467 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2468 | at::functionalization::impl::sync(out); |
2469 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2470 | } else { |
2471 | out_ = out; |
2472 | } |
2473 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2474 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2475 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2476 | TORCH_INTERNAL_ASSERT(false, |
2477 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2478 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2479 | } else { |
2480 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2481 | at::AutoDispatchSkipFunctionalize guard; |
2482 | at::Tensor tmp_output = at::_ops::arctan_out::call(self_, out_); |
2483 | return out;; |
2484 | } |
2485 | } else { |
2486 | at::Tensor tmp_output; |
2487 | { |
2488 | at::AutoDispatchSkipFunctionalize guard; |
2489 | tmp_output = at::_ops::arctan::call(self_); |
2490 | } |
2491 | at::functionalization::impl::replace_(out, tmp_output); |
2492 | at::functionalization::impl::commit_update(out); |
2493 | at::functionalization::impl::sync(out); |
2494 | return out; |
2495 | } |
2496 | } |
2497 | |
2498 | at::Tensor & arctan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
2499 | if (true) { |
2500 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2501 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2502 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2503 | auto self_meta = to_meta(self); |
2504 | at::AutoDispatchSkipFunctionalize func_guard; |
2505 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2506 | at::_ops::arctan_::call(self_meta); |
2507 | } |
2508 | |
2509 | at::Tensor self_; |
2510 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2511 | at::functionalization::impl::sync(self); |
2512 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2513 | } else { |
2514 | self_ = self; |
2515 | } |
2516 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2517 | if ((false)) { |
2518 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2519 | TORCH_INTERNAL_ASSERT(false, |
2520 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2521 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2522 | } else { |
2523 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2524 | at::AutoDispatchSkipFunctionalize guard; |
2525 | at::Tensor tmp_output = at::_ops::arctan_::call(self_); |
2526 | return self;; |
2527 | } |
2528 | } else { |
2529 | at::Tensor tmp_output; |
2530 | { |
2531 | at::AutoDispatchSkipFunctionalize guard; |
2532 | tmp_output = at::_ops::arctan::call(self_); |
2533 | } |
2534 | at::functionalization::impl::replace_(self, tmp_output); |
2535 | at::functionalization::impl::commit_update(self); |
2536 | at::functionalization::impl::sync(self); |
2537 | return self; |
2538 | } |
2539 | } |
2540 | |
2541 | at::Tensor & bincount_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength, at::Tensor & out) { |
2542 | if (false) { |
2543 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2544 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2545 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2546 | auto self_meta = to_meta(self); |
2547 | auto weights_meta = to_meta(weights); |
2548 | auto out_meta = to_meta(out); |
2549 | at::AutoDispatchSkipFunctionalize func_guard; |
2550 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2551 | at::_ops::bincount_out::call(self_meta, weights_meta, minlength, out_meta); |
2552 | } |
2553 | |
2554 | at::Tensor self_; |
2555 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2556 | at::functionalization::impl::sync(self); |
2557 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2558 | } else { |
2559 | self_ = self; |
2560 | } |
2561 | |
2562 | c10::optional<at::Tensor> weights_; |
2563 | if (at::functionalization::impl::isFunctionalTensor(weights)) { |
2564 | at::functionalization::impl::sync(weights); |
2565 | weights_ = at::functionalization::impl::from_functional_tensor(weights); |
2566 | } else { |
2567 | weights_ = weights; |
2568 | } |
2569 | |
2570 | at::Tensor out_; |
2571 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2572 | at::functionalization::impl::sync(out); |
2573 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2574 | } else { |
2575 | out_ = out; |
2576 | } |
2577 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2578 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weights))) { |
2579 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2580 | TORCH_INTERNAL_ASSERT(false, |
2581 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2582 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2583 | } else { |
2584 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2585 | at::AutoDispatchSkipFunctionalize guard; |
2586 | at::Tensor tmp_output = at::_ops::bincount_out::call(self_, weights_, minlength, out_); |
2587 | return out;; |
2588 | } |
2589 | } else { |
2590 | at::Tensor tmp_output; |
2591 | { |
2592 | at::AutoDispatchSkipFunctionalize guard; |
2593 | tmp_output = at::_ops::bincount::call(self_, weights_, minlength); |
2594 | } |
2595 | at::functionalization::impl::replace_(out, tmp_output); |
2596 | at::functionalization::impl::commit_update(out); |
2597 | at::functionalization::impl::sync(out); |
2598 | return out; |
2599 | } |
2600 | } |
2601 | |
2602 | at::Tensor & copysign_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2603 | if (false) { |
2604 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2605 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2606 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2607 | auto self_meta = to_meta(self); |
2608 | auto other_meta = to_meta(other); |
2609 | auto out_meta = to_meta(out); |
2610 | at::AutoDispatchSkipFunctionalize func_guard; |
2611 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2612 | at::_ops::copysign_out::call(self_meta, other_meta, out_meta); |
2613 | } |
2614 | |
2615 | at::Tensor self_; |
2616 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2617 | at::functionalization::impl::sync(self); |
2618 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2619 | } else { |
2620 | self_ = self; |
2621 | } |
2622 | |
2623 | at::Tensor other_; |
2624 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
2625 | at::functionalization::impl::sync(other); |
2626 | other_ = at::functionalization::impl::from_functional_tensor(other); |
2627 | } else { |
2628 | other_ = other; |
2629 | } |
2630 | |
2631 | at::Tensor out_; |
2632 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2633 | at::functionalization::impl::sync(out); |
2634 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2635 | } else { |
2636 | out_ = out; |
2637 | } |
2638 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2639 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
2640 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2641 | TORCH_INTERNAL_ASSERT(false, |
2642 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2643 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2644 | } else { |
2645 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2646 | at::AutoDispatchSkipFunctionalize guard; |
2647 | at::Tensor tmp_output = at::_ops::copysign_out::call(self_, other_, out_); |
2648 | return out;; |
2649 | } |
2650 | } else { |
2651 | at::Tensor tmp_output; |
2652 | { |
2653 | at::AutoDispatchSkipFunctionalize guard; |
2654 | tmp_output = at::_ops::copysign_Tensor::call(self_, other_); |
2655 | } |
2656 | at::functionalization::impl::replace_(out, tmp_output); |
2657 | at::functionalization::impl::commit_update(out); |
2658 | at::functionalization::impl::sync(out); |
2659 | return out; |
2660 | } |
2661 | } |
2662 | |
2663 | at::Tensor & copysign__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
2664 | if (true) { |
2665 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2666 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2667 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2668 | auto self_meta = to_meta(self); |
2669 | auto other_meta = to_meta(other); |
2670 | at::AutoDispatchSkipFunctionalize func_guard; |
2671 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2672 | at::_ops::copysign__Tensor::call(self_meta, other_meta); |
2673 | } |
2674 | |
2675 | at::Tensor self_; |
2676 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2677 | at::functionalization::impl::sync(self); |
2678 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2679 | } else { |
2680 | self_ = self; |
2681 | } |
2682 | |
2683 | at::Tensor other_; |
2684 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
2685 | at::functionalization::impl::sync(other); |
2686 | other_ = at::functionalization::impl::from_functional_tensor(other); |
2687 | } else { |
2688 | other_ = other; |
2689 | } |
2690 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2691 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
2692 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2693 | TORCH_INTERNAL_ASSERT(false, |
2694 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2695 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2696 | } else { |
2697 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2698 | at::AutoDispatchSkipFunctionalize guard; |
2699 | at::Tensor tmp_output = at::_ops::copysign__Tensor::call(self_, other_); |
2700 | return self;; |
2701 | } |
2702 | } else { |
2703 | at::Tensor tmp_output; |
2704 | { |
2705 | at::AutoDispatchSkipFunctionalize guard; |
2706 | tmp_output = at::_ops::copysign_Tensor::call(self_, other_); |
2707 | } |
2708 | at::functionalization::impl::replace_(self, tmp_output); |
2709 | at::functionalization::impl::commit_update(self); |
2710 | at::functionalization::impl::sync(self); |
2711 | return self; |
2712 | } |
2713 | } |
2714 | |
2715 | at::Tensor & copysign_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
2716 | if (false) { |
2717 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2718 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2719 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2720 | auto self_meta = to_meta(self); |
2721 | auto out_meta = to_meta(out); |
2722 | at::AutoDispatchSkipFunctionalize func_guard; |
2723 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2724 | at::_ops::copysign_Scalar_out::call(self_meta, other, out_meta); |
2725 | } |
2726 | |
2727 | at::Tensor self_; |
2728 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2729 | at::functionalization::impl::sync(self); |
2730 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2731 | } else { |
2732 | self_ = self; |
2733 | } |
2734 | |
2735 | at::Tensor out_; |
2736 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2737 | at::functionalization::impl::sync(out); |
2738 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2739 | } else { |
2740 | out_ = out; |
2741 | } |
2742 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2743 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
2744 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2745 | TORCH_INTERNAL_ASSERT(false, |
2746 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2747 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2748 | } else { |
2749 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2750 | at::AutoDispatchSkipFunctionalize guard; |
2751 | at::Tensor tmp_output = at::_ops::copysign_Scalar_out::call(self_, other, out_); |
2752 | return out;; |
2753 | } |
2754 | } else { |
2755 | at::Tensor tmp_output; |
2756 | { |
2757 | at::AutoDispatchSkipFunctionalize guard; |
2758 | tmp_output = at::_ops::copysign_Scalar::call(self_, other); |
2759 | } |
2760 | at::functionalization::impl::replace_(out, tmp_output); |
2761 | at::functionalization::impl::commit_update(out); |
2762 | at::functionalization::impl::sync(out); |
2763 | return out; |
2764 | } |
2765 | } |
2766 | |
2767 | at::Tensor & copysign__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
2768 | if (true) { |
2769 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2770 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2771 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2772 | auto self_meta = to_meta(self); |
2773 | at::AutoDispatchSkipFunctionalize func_guard; |
2774 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2775 | at::_ops::copysign__Scalar::call(self_meta, other); |
2776 | } |
2777 | |
2778 | at::Tensor self_; |
2779 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2780 | at::functionalization::impl::sync(self); |
2781 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2782 | } else { |
2783 | self_ = self; |
2784 | } |
2785 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2786 | if ((false)) { |
2787 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2788 | TORCH_INTERNAL_ASSERT(false, |
2789 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2790 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2791 | } else { |
2792 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2793 | at::AutoDispatchSkipFunctionalize guard; |
2794 | at::Tensor tmp_output = at::_ops::copysign__Scalar::call(self_, other); |
2795 | return self;; |
2796 | } |
2797 | } else { |
2798 | at::Tensor tmp_output; |
2799 | { |
2800 | at::AutoDispatchSkipFunctionalize guard; |
2801 | tmp_output = at::_ops::copysign_Scalar::call(self_, other); |
2802 | } |
2803 | at::functionalization::impl::replace_(self, tmp_output); |
2804 | at::functionalization::impl::commit_update(self); |
2805 | at::functionalization::impl::sync(self); |
2806 | return self; |
2807 | } |
2808 | } |
2809 | |
2810 | at::Tensor & logical_or_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
2811 | if (false) { |
2812 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2813 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2814 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2815 | auto self_meta = to_meta(self); |
2816 | auto other_meta = to_meta(other); |
2817 | auto out_meta = to_meta(out); |
2818 | at::AutoDispatchSkipFunctionalize func_guard; |
2819 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2820 | at::_ops::logical_or_out::call(self_meta, other_meta, out_meta); |
2821 | } |
2822 | |
2823 | at::Tensor self_; |
2824 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2825 | at::functionalization::impl::sync(self); |
2826 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2827 | } else { |
2828 | self_ = self; |
2829 | } |
2830 | |
2831 | at::Tensor other_; |
2832 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
2833 | at::functionalization::impl::sync(other); |
2834 | other_ = at::functionalization::impl::from_functional_tensor(other); |
2835 | } else { |
2836 | other_ = other; |
2837 | } |
2838 | |
2839 | at::Tensor out_; |
2840 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2841 | at::functionalization::impl::sync(out); |
2842 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2843 | } else { |
2844 | out_ = out; |
2845 | } |
2846 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2847 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
2848 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2849 | TORCH_INTERNAL_ASSERT(false, |
2850 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2851 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2852 | } else { |
2853 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2854 | at::AutoDispatchSkipFunctionalize guard; |
2855 | at::Tensor tmp_output = at::_ops::logical_or_out::call(self_, other_, out_); |
2856 | return out;; |
2857 | } |
2858 | } else { |
2859 | at::Tensor tmp_output; |
2860 | { |
2861 | at::AutoDispatchSkipFunctionalize guard; |
2862 | tmp_output = at::_ops::logical_or::call(self_, other_); |
2863 | } |
2864 | at::functionalization::impl::replace_(out, tmp_output); |
2865 | at::functionalization::impl::commit_update(out); |
2866 | at::functionalization::impl::sync(out); |
2867 | return out; |
2868 | } |
2869 | } |
2870 | |
2871 | at::Tensor & logical_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
2872 | if (true) { |
2873 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2874 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2875 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2876 | auto self_meta = to_meta(self); |
2877 | auto other_meta = to_meta(other); |
2878 | at::AutoDispatchSkipFunctionalize func_guard; |
2879 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2880 | at::_ops::logical_or_::call(self_meta, other_meta); |
2881 | } |
2882 | |
2883 | at::Tensor self_; |
2884 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
2885 | at::functionalization::impl::sync(self); |
2886 | self_ = at::functionalization::impl::from_functional_tensor(self); |
2887 | } else { |
2888 | self_ = self; |
2889 | } |
2890 | |
2891 | at::Tensor other_; |
2892 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
2893 | at::functionalization::impl::sync(other); |
2894 | other_ = at::functionalization::impl::from_functional_tensor(other); |
2895 | } else { |
2896 | other_ = other; |
2897 | } |
2898 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
2899 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
2900 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2901 | TORCH_INTERNAL_ASSERT(false, |
2902 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2903 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2904 | } else { |
2905 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2906 | at::AutoDispatchSkipFunctionalize guard; |
2907 | at::Tensor tmp_output = at::_ops::logical_or_::call(self_, other_); |
2908 | return self;; |
2909 | } |
2910 | } else { |
2911 | at::Tensor tmp_output; |
2912 | { |
2913 | at::AutoDispatchSkipFunctionalize guard; |
2914 | tmp_output = at::_ops::logical_or::call(self_, other_); |
2915 | } |
2916 | at::functionalization::impl::replace_(self, tmp_output); |
2917 | at::functionalization::impl::commit_update(self); |
2918 | at::functionalization::impl::sync(self); |
2919 | return self; |
2920 | } |
2921 | } |
2922 | |
2923 | at::Tensor & cat_out_out(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) { |
2924 | if (false) { |
2925 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2926 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2927 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2928 | auto tensors_meta = to_meta(tensors); |
2929 | auto out_meta = to_meta(out); |
2930 | at::AutoDispatchSkipFunctionalize func_guard; |
2931 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2932 | at::_ops::cat_out::call(tensors_meta, dim, out_meta); |
2933 | } |
2934 | |
2935 | ::std::vector<at::Tensor> tensors_; |
2936 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
2937 | at::functionalization::impl::sync(tensors); |
2938 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
2939 | } else { |
2940 | tensors_ = {tensors.begin(), tensors.end()}; |
2941 | } |
2942 | |
2943 | at::Tensor out_; |
2944 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2945 | at::functionalization::impl::sync(out); |
2946 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2947 | } else { |
2948 | out_ = out; |
2949 | } |
2950 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
2951 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
2952 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
2953 | TORCH_INTERNAL_ASSERT(false, |
2954 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
2955 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
2956 | } else { |
2957 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
2958 | at::AutoDispatchSkipFunctionalize guard; |
2959 | at::Tensor tmp_output = at::_ops::cat_out::call(tensors_, dim, out_); |
2960 | return out;; |
2961 | } |
2962 | } else { |
2963 | at::Tensor tmp_output; |
2964 | { |
2965 | at::AutoDispatchSkipFunctionalize guard; |
2966 | tmp_output = at::_ops::cat::call(tensors_, dim); |
2967 | } |
2968 | at::functionalization::impl::replace_(out, tmp_output); |
2969 | at::functionalization::impl::commit_update(out); |
2970 | at::functionalization::impl::sync(out); |
2971 | return out; |
2972 | } |
2973 | } |
2974 | |
2975 | at::Tensor & cat_out_names_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
2976 | if (false) { |
2977 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
2978 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
2979 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
2980 | auto tensors_meta = to_meta(tensors); |
2981 | auto out_meta = to_meta(out); |
2982 | at::AutoDispatchSkipFunctionalize func_guard; |
2983 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
2984 | at::_ops::cat_names_out::call(tensors_meta, dim, out_meta); |
2985 | } |
2986 | |
2987 | ::std::vector<at::Tensor> tensors_; |
2988 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
2989 | at::functionalization::impl::sync(tensors); |
2990 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
2991 | } else { |
2992 | tensors_ = tensors.vec(); |
2993 | } |
2994 | |
2995 | at::Tensor out_; |
2996 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
2997 | at::functionalization::impl::sync(out); |
2998 | out_ = at::functionalization::impl::from_functional_tensor(out); |
2999 | } else { |
3000 | out_ = out; |
3001 | } |
3002 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3003 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
3004 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3005 | TORCH_INTERNAL_ASSERT(false, |
3006 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3007 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3008 | } else { |
3009 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3010 | at::AutoDispatchSkipFunctionalize guard; |
3011 | at::Tensor tmp_output = at::_ops::cat_names_out::call(tensors_, dim, out_); |
3012 | return out;; |
3013 | } |
3014 | } else { |
3015 | at::Tensor tmp_output; |
3016 | { |
3017 | at::AutoDispatchSkipFunctionalize guard; |
3018 | tmp_output = at::_ops::cat_names::call(tensors_, dim); |
3019 | } |
3020 | at::functionalization::impl::replace_(out, tmp_output); |
3021 | at::functionalization::impl::commit_update(out); |
3022 | at::functionalization::impl::sync(out); |
3023 | return out; |
3024 | } |
3025 | } |
3026 | |
3027 | at::Tensor & concat_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { |
3028 | if (false) { |
3029 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3030 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3031 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3032 | auto tensors_meta = to_meta(tensors); |
3033 | auto out_meta = to_meta(out); |
3034 | at::AutoDispatchSkipFunctionalize func_guard; |
3035 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3036 | at::_ops::concat_out::call(tensors_meta, dim, out_meta); |
3037 | } |
3038 | |
3039 | ::std::vector<at::Tensor> tensors_; |
3040 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
3041 | at::functionalization::impl::sync(tensors); |
3042 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
3043 | } else { |
3044 | tensors_ = tensors.vec(); |
3045 | } |
3046 | |
3047 | at::Tensor out_; |
3048 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3049 | at::functionalization::impl::sync(out); |
3050 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3051 | } else { |
3052 | out_ = out; |
3053 | } |
3054 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3055 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
3056 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3057 | TORCH_INTERNAL_ASSERT(false, |
3058 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3059 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3060 | } else { |
3061 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3062 | at::AutoDispatchSkipFunctionalize guard; |
3063 | at::Tensor tmp_output = at::_ops::concat_out::call(tensors_, dim, out_); |
3064 | return out;; |
3065 | } |
3066 | } else { |
3067 | at::Tensor tmp_output; |
3068 | { |
3069 | at::AutoDispatchSkipFunctionalize guard; |
3070 | tmp_output = at::_ops::concat::call(tensors_, dim); |
3071 | } |
3072 | at::functionalization::impl::replace_(out, tmp_output); |
3073 | at::functionalization::impl::commit_update(out); |
3074 | at::functionalization::impl::sync(out); |
3075 | return out; |
3076 | } |
3077 | } |
3078 | |
3079 | at::Tensor & concat_out_names_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { |
3080 | if (false) { |
3081 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3082 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3083 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3084 | auto tensors_meta = to_meta(tensors); |
3085 | auto out_meta = to_meta(out); |
3086 | at::AutoDispatchSkipFunctionalize func_guard; |
3087 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3088 | at::_ops::concat_names_out::call(tensors_meta, dim, out_meta); |
3089 | } |
3090 | |
3091 | ::std::vector<at::Tensor> tensors_; |
3092 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
3093 | at::functionalization::impl::sync(tensors); |
3094 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
3095 | } else { |
3096 | tensors_ = tensors.vec(); |
3097 | } |
3098 | |
3099 | at::Tensor out_; |
3100 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3101 | at::functionalization::impl::sync(out); |
3102 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3103 | } else { |
3104 | out_ = out; |
3105 | } |
3106 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3107 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
3108 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3109 | TORCH_INTERNAL_ASSERT(false, |
3110 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3111 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3112 | } else { |
3113 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3114 | at::AutoDispatchSkipFunctionalize guard; |
3115 | at::Tensor tmp_output = at::_ops::concat_names_out::call(tensors_, dim, out_); |
3116 | return out;; |
3117 | } |
3118 | } else { |
3119 | at::Tensor tmp_output; |
3120 | { |
3121 | at::AutoDispatchSkipFunctionalize guard; |
3122 | tmp_output = at::_ops::concat_names::call(tensors_, dim); |
3123 | } |
3124 | at::functionalization::impl::replace_(out, tmp_output); |
3125 | at::functionalization::impl::commit_update(out); |
3126 | at::functionalization::impl::sync(out); |
3127 | return out; |
3128 | } |
3129 | } |
3130 | |
3131 | at::Tensor & ceil_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
3132 | if (false) { |
3133 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3134 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3135 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3136 | auto self_meta = to_meta(self); |
3137 | auto out_meta = to_meta(out); |
3138 | at::AutoDispatchSkipFunctionalize func_guard; |
3139 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3140 | at::_ops::ceil_out::call(self_meta, out_meta); |
3141 | } |
3142 | |
3143 | at::Tensor self_; |
3144 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3145 | at::functionalization::impl::sync(self); |
3146 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3147 | } else { |
3148 | self_ = self; |
3149 | } |
3150 | |
3151 | at::Tensor out_; |
3152 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3153 | at::functionalization::impl::sync(out); |
3154 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3155 | } else { |
3156 | out_ = out; |
3157 | } |
3158 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3159 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3160 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3161 | TORCH_INTERNAL_ASSERT(false, |
3162 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3163 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3164 | } else { |
3165 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3166 | at::AutoDispatchSkipFunctionalize guard; |
3167 | at::Tensor tmp_output = at::_ops::ceil_out::call(self_, out_); |
3168 | return out;; |
3169 | } |
3170 | } else { |
3171 | at::Tensor tmp_output; |
3172 | { |
3173 | at::AutoDispatchSkipFunctionalize guard; |
3174 | tmp_output = at::_ops::ceil::call(self_); |
3175 | } |
3176 | at::functionalization::impl::replace_(out, tmp_output); |
3177 | at::functionalization::impl::commit_update(out); |
3178 | at::functionalization::impl::sync(out); |
3179 | return out; |
3180 | } |
3181 | } |
3182 | |
3183 | at::Tensor & ceil_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
3184 | if (true) { |
3185 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3186 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3187 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3188 | auto self_meta = to_meta(self); |
3189 | at::AutoDispatchSkipFunctionalize func_guard; |
3190 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3191 | at::_ops::ceil_::call(self_meta); |
3192 | } |
3193 | |
3194 | at::Tensor self_; |
3195 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3196 | at::functionalization::impl::sync(self); |
3197 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3198 | } else { |
3199 | self_ = self; |
3200 | } |
3201 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
3202 | if ((false)) { |
3203 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3204 | TORCH_INTERNAL_ASSERT(false, |
3205 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3206 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3207 | } else { |
3208 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3209 | at::AutoDispatchSkipFunctionalize guard; |
3210 | at::Tensor tmp_output = at::_ops::ceil_::call(self_); |
3211 | return self;; |
3212 | } |
3213 | } else { |
3214 | at::Tensor tmp_output; |
3215 | { |
3216 | at::AutoDispatchSkipFunctionalize guard; |
3217 | tmp_output = at::_ops::ceil::call(self_); |
3218 | } |
3219 | at::functionalization::impl::replace_(self, tmp_output); |
3220 | at::functionalization::impl::commit_update(self); |
3221 | at::functionalization::impl::sync(self); |
3222 | return self; |
3223 | } |
3224 | } |
3225 | |
3226 | at::Tensor & polar_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) { |
3227 | if (false) { |
3228 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3229 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3230 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3231 | auto abs_meta = to_meta(abs); |
3232 | auto angle_meta = to_meta(angle); |
3233 | auto out_meta = to_meta(out); |
3234 | at::AutoDispatchSkipFunctionalize func_guard; |
3235 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3236 | at::_ops::polar_out::call(abs_meta, angle_meta, out_meta); |
3237 | } |
3238 | |
3239 | at::Tensor abs_; |
3240 | if (at::functionalization::impl::isFunctionalTensor(abs)) { |
3241 | at::functionalization::impl::sync(abs); |
3242 | abs_ = at::functionalization::impl::from_functional_tensor(abs); |
3243 | } else { |
3244 | abs_ = abs; |
3245 | } |
3246 | |
3247 | at::Tensor angle_; |
3248 | if (at::functionalization::impl::isFunctionalTensor(angle)) { |
3249 | at::functionalization::impl::sync(angle); |
3250 | angle_ = at::functionalization::impl::from_functional_tensor(angle); |
3251 | } else { |
3252 | angle_ = angle; |
3253 | } |
3254 | |
3255 | at::Tensor out_; |
3256 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3257 | at::functionalization::impl::sync(out); |
3258 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3259 | } else { |
3260 | out_ = out; |
3261 | } |
3262 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3263 | if ((false || at::functionalization::impl::isFunctionalTensor(abs) || at::functionalization::impl::isFunctionalTensor(angle))) { |
3264 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3265 | TORCH_INTERNAL_ASSERT(false, |
3266 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3267 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3268 | } else { |
3269 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3270 | at::AutoDispatchSkipFunctionalize guard; |
3271 | at::Tensor tmp_output = at::_ops::polar_out::call(abs_, angle_, out_); |
3272 | return out;; |
3273 | } |
3274 | } else { |
3275 | at::Tensor tmp_output; |
3276 | { |
3277 | at::AutoDispatchSkipFunctionalize guard; |
3278 | tmp_output = at::_ops::polar::call(abs_, angle_); |
3279 | } |
3280 | at::functionalization::impl::replace_(out, tmp_output); |
3281 | at::functionalization::impl::commit_update(out); |
3282 | at::functionalization::impl::sync(out); |
3283 | return out; |
3284 | } |
3285 | } |
3286 | |
3287 | at::Tensor & convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
3288 | if (false) { |
3289 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3290 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3291 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3292 | auto input_meta = to_meta(input); |
3293 | auto weight_meta = to_meta(weight); |
3294 | auto bias_meta = to_meta(bias); |
3295 | auto out_meta = to_meta(out); |
3296 | at::AutoDispatchSkipFunctionalize func_guard; |
3297 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3298 | at::_ops::convolution_out::call(input_meta, weight_meta, bias_meta, stride, padding, dilation, transposed, output_padding, groups, out_meta); |
3299 | } |
3300 | |
3301 | at::Tensor input_; |
3302 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
3303 | at::functionalization::impl::sync(input); |
3304 | input_ = at::functionalization::impl::from_functional_tensor(input); |
3305 | } else { |
3306 | input_ = input; |
3307 | } |
3308 | |
3309 | at::Tensor weight_; |
3310 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3311 | at::functionalization::impl::sync(weight); |
3312 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3313 | } else { |
3314 | weight_ = weight; |
3315 | } |
3316 | |
3317 | c10::optional<at::Tensor> bias_; |
3318 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3319 | at::functionalization::impl::sync(bias); |
3320 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3321 | } else { |
3322 | bias_ = bias; |
3323 | } |
3324 | |
3325 | at::Tensor out_; |
3326 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3327 | at::functionalization::impl::sync(out); |
3328 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3329 | } else { |
3330 | out_ = out; |
3331 | } |
3332 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3333 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
3334 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3335 | TORCH_INTERNAL_ASSERT(false, |
3336 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3337 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3338 | } else { |
3339 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3340 | at::AutoDispatchSkipFunctionalize guard; |
3341 | at::Tensor tmp_output = at::_ops::convolution_out::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, out_); |
3342 | return out;; |
3343 | } |
3344 | } else { |
3345 | at::Tensor tmp_output; |
3346 | { |
3347 | at::AutoDispatchSkipFunctionalize guard; |
3348 | tmp_output = at::_ops::convolution::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups); |
3349 | } |
3350 | at::functionalization::impl::replace_(out, tmp_output); |
3351 | at::functionalization::impl::commit_update(out); |
3352 | at::functionalization::impl::sync(out); |
3353 | return out; |
3354 | } |
3355 | } |
3356 | |
3357 | at::Tensor & convolution_overrideable_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { |
3358 | if (false) { |
3359 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3360 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3361 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3362 | auto input_meta = to_meta(input); |
3363 | auto weight_meta = to_meta(weight); |
3364 | auto bias_meta = to_meta(bias); |
3365 | auto out_meta = to_meta(out); |
3366 | at::AutoDispatchSkipFunctionalize func_guard; |
3367 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3368 | at::_ops::convolution_overrideable_out::call(input_meta, weight_meta, bias_meta, stride, padding, dilation, transposed, output_padding, groups, out_meta); |
3369 | } |
3370 | |
3371 | at::Tensor input_; |
3372 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
3373 | at::functionalization::impl::sync(input); |
3374 | input_ = at::functionalization::impl::from_functional_tensor(input); |
3375 | } else { |
3376 | input_ = input; |
3377 | } |
3378 | |
3379 | at::Tensor weight_; |
3380 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3381 | at::functionalization::impl::sync(weight); |
3382 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3383 | } else { |
3384 | weight_ = weight; |
3385 | } |
3386 | |
3387 | c10::optional<at::Tensor> bias_; |
3388 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3389 | at::functionalization::impl::sync(bias); |
3390 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3391 | } else { |
3392 | bias_ = bias; |
3393 | } |
3394 | |
3395 | at::Tensor out_; |
3396 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3397 | at::functionalization::impl::sync(out); |
3398 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3399 | } else { |
3400 | out_ = out; |
3401 | } |
3402 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3403 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
3404 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3405 | TORCH_INTERNAL_ASSERT(false, |
3406 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3407 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3408 | } else { |
3409 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3410 | at::AutoDispatchSkipFunctionalize guard; |
3411 | at::Tensor tmp_output = at::_ops::convolution_overrideable_out::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups, out_); |
3412 | return out;; |
3413 | } |
3414 | } else { |
3415 | at::Tensor tmp_output; |
3416 | { |
3417 | at::AutoDispatchSkipFunctionalize guard; |
3418 | tmp_output = at::_ops::convolution_overrideable::call(input_, weight_, bias_, stride, padding, dilation, transposed, output_padding, groups); |
3419 | } |
3420 | at::functionalization::impl::replace_(out, tmp_output); |
3421 | at::functionalization::impl::commit_update(out); |
3422 | at::functionalization::impl::sync(out); |
3423 | return out; |
3424 | } |
3425 | } |
3426 | |
3427 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> convolution_backward_overrideable_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3428 | if (false) { |
3429 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3430 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3431 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3432 | auto grad_output_meta = to_meta(grad_output); |
3433 | auto input_meta = to_meta(input); |
3434 | auto weight_meta = to_meta(weight); |
3435 | auto out0_meta = to_meta(out0); |
3436 | auto out1_meta = to_meta(out1); |
3437 | auto out2_meta = to_meta(out2); |
3438 | at::AutoDispatchSkipFunctionalize func_guard; |
3439 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3440 | at::_ops::convolution_backward_overrideable_out::call(grad_output_meta, input_meta, weight_meta, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_meta, out1_meta, out2_meta); |
3441 | } |
3442 | |
3443 | at::Tensor grad_output_; |
3444 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
3445 | at::functionalization::impl::sync(grad_output); |
3446 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
3447 | } else { |
3448 | grad_output_ = grad_output; |
3449 | } |
3450 | |
3451 | at::Tensor input_; |
3452 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
3453 | at::functionalization::impl::sync(input); |
3454 | input_ = at::functionalization::impl::from_functional_tensor(input); |
3455 | } else { |
3456 | input_ = input; |
3457 | } |
3458 | |
3459 | at::Tensor weight_; |
3460 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3461 | at::functionalization::impl::sync(weight); |
3462 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3463 | } else { |
3464 | weight_ = weight; |
3465 | } |
3466 | |
3467 | at::Tensor out0_; |
3468 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
3469 | at::functionalization::impl::sync(out0); |
3470 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
3471 | } else { |
3472 | out0_ = out0; |
3473 | } |
3474 | |
3475 | at::Tensor out1_; |
3476 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
3477 | at::functionalization::impl::sync(out1); |
3478 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
3479 | } else { |
3480 | out1_ = out1; |
3481 | } |
3482 | |
3483 | at::Tensor out2_; |
3484 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
3485 | at::functionalization::impl::sync(out2); |
3486 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
3487 | } else { |
3488 | out2_ = out2; |
3489 | } |
3490 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
3491 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight))) { |
3492 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3493 | TORCH_INTERNAL_ASSERT(false, |
3494 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3495 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3496 | } else { |
3497 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3498 | at::AutoDispatchSkipFunctionalize guard; |
3499 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::convolution_backward_overrideable_out::call(grad_output_, input_, weight_, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0_, out1_, out2_); |
3500 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
3501 | } |
3502 | } else { |
3503 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
3504 | { |
3505 | at::AutoDispatchSkipFunctionalize guard; |
3506 | tmp_output = at::_ops::convolution_backward_overrideable::call(grad_output_, input_, weight_, stride, padding, dilation, transposed, output_padding, groups, output_mask); |
3507 | } |
3508 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
3509 | at::functionalization::impl::commit_update(out0); |
3510 | at::functionalization::impl::sync(out0); |
3511 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
3512 | at::functionalization::impl::commit_update(out1); |
3513 | at::functionalization::impl::sync(out1); |
3514 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
3515 | at::functionalization::impl::commit_update(out2); |
3516 | at::functionalization::impl::sync(out2); |
3517 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3518 | } |
3519 | } |
3520 | |
3521 | at::Tensor & cudnn_affine_grid_generator_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { |
3522 | if (false) { |
3523 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3524 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3525 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3526 | auto theta_meta = to_meta(theta); |
3527 | auto out_meta = to_meta(out); |
3528 | at::AutoDispatchSkipFunctionalize func_guard; |
3529 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3530 | at::_ops::cudnn_affine_grid_generator_out::call(theta_meta, N, C, H, W, out_meta); |
3531 | } |
3532 | |
3533 | at::Tensor theta_; |
3534 | if (at::functionalization::impl::isFunctionalTensor(theta)) { |
3535 | at::functionalization::impl::sync(theta); |
3536 | theta_ = at::functionalization::impl::from_functional_tensor(theta); |
3537 | } else { |
3538 | theta_ = theta; |
3539 | } |
3540 | |
3541 | at::Tensor out_; |
3542 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3543 | at::functionalization::impl::sync(out); |
3544 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3545 | } else { |
3546 | out_ = out; |
3547 | } |
3548 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3549 | if ((false || at::functionalization::impl::isFunctionalTensor(theta))) { |
3550 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3551 | TORCH_INTERNAL_ASSERT(false, |
3552 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3553 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3554 | } else { |
3555 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3556 | at::AutoDispatchSkipFunctionalize guard; |
3557 | at::Tensor tmp_output = at::_ops::cudnn_affine_grid_generator_out::call(theta_, N, C, H, W, out_); |
3558 | return out;; |
3559 | } |
3560 | } else { |
3561 | at::Tensor tmp_output; |
3562 | { |
3563 | at::AutoDispatchSkipFunctionalize guard; |
3564 | tmp_output = at::_ops::cudnn_affine_grid_generator::call(theta_, N, C, H, W); |
3565 | } |
3566 | at::functionalization::impl::replace_(out, tmp_output); |
3567 | at::functionalization::impl::commit_update(out); |
3568 | at::functionalization::impl::sync(out); |
3569 | return out; |
3570 | } |
3571 | } |
3572 | |
3573 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
3574 | if (false) { |
3575 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3576 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3577 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3578 | auto input_meta = to_meta(input); |
3579 | auto grad_output_meta = to_meta(grad_output); |
3580 | auto weight_meta = to_meta(weight); |
3581 | auto running_mean_meta = to_meta(running_mean); |
3582 | auto running_var_meta = to_meta(running_var); |
3583 | auto save_mean_meta = to_meta(save_mean); |
3584 | auto save_var_meta = to_meta(save_var); |
3585 | auto reserveSpace_meta = to_meta(reserveSpace); |
3586 | auto out0_meta = to_meta(out0); |
3587 | auto out1_meta = to_meta(out1); |
3588 | auto out2_meta = to_meta(out2); |
3589 | at::AutoDispatchSkipFunctionalize func_guard; |
3590 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3591 | at::_ops::cudnn_batch_norm_backward_out::call(input_meta, grad_output_meta, weight_meta, running_mean_meta, running_var_meta, save_mean_meta, save_var_meta, epsilon, reserveSpace_meta, out0_meta, out1_meta, out2_meta); |
3592 | } |
3593 | |
3594 | at::Tensor input_; |
3595 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
3596 | at::functionalization::impl::sync(input); |
3597 | input_ = at::functionalization::impl::from_functional_tensor(input); |
3598 | } else { |
3599 | input_ = input; |
3600 | } |
3601 | |
3602 | at::Tensor grad_output_; |
3603 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
3604 | at::functionalization::impl::sync(grad_output); |
3605 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
3606 | } else { |
3607 | grad_output_ = grad_output; |
3608 | } |
3609 | |
3610 | at::Tensor weight_; |
3611 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3612 | at::functionalization::impl::sync(weight); |
3613 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3614 | } else { |
3615 | weight_ = weight; |
3616 | } |
3617 | |
3618 | c10::optional<at::Tensor> running_mean_; |
3619 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
3620 | at::functionalization::impl::sync(running_mean); |
3621 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
3622 | } else { |
3623 | running_mean_ = running_mean; |
3624 | } |
3625 | |
3626 | c10::optional<at::Tensor> running_var_; |
3627 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
3628 | at::functionalization::impl::sync(running_var); |
3629 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
3630 | } else { |
3631 | running_var_ = running_var; |
3632 | } |
3633 | |
3634 | c10::optional<at::Tensor> save_mean_; |
3635 | if (at::functionalization::impl::isFunctionalTensor(save_mean)) { |
3636 | at::functionalization::impl::sync(save_mean); |
3637 | save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean); |
3638 | } else { |
3639 | save_mean_ = save_mean; |
3640 | } |
3641 | |
3642 | c10::optional<at::Tensor> save_var_; |
3643 | if (at::functionalization::impl::isFunctionalTensor(save_var)) { |
3644 | at::functionalization::impl::sync(save_var); |
3645 | save_var_ = at::functionalization::impl::from_functional_tensor(save_var); |
3646 | } else { |
3647 | save_var_ = save_var; |
3648 | } |
3649 | |
3650 | at::Tensor reserveSpace_; |
3651 | if (at::functionalization::impl::isFunctionalTensor(reserveSpace)) { |
3652 | at::functionalization::impl::sync(reserveSpace); |
3653 | reserveSpace_ = at::functionalization::impl::from_functional_tensor(reserveSpace); |
3654 | } else { |
3655 | reserveSpace_ = reserveSpace; |
3656 | } |
3657 | |
3658 | at::Tensor out0_; |
3659 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
3660 | at::functionalization::impl::sync(out0); |
3661 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
3662 | } else { |
3663 | out0_ = out0; |
3664 | } |
3665 | |
3666 | at::Tensor out1_; |
3667 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
3668 | at::functionalization::impl::sync(out1); |
3669 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
3670 | } else { |
3671 | out1_ = out1; |
3672 | } |
3673 | |
3674 | at::Tensor out2_; |
3675 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
3676 | at::functionalization::impl::sync(out2); |
3677 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
3678 | } else { |
3679 | out2_ = out2; |
3680 | } |
3681 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
3682 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var) || at::functionalization::impl::isFunctionalTensor(save_mean) || at::functionalization::impl::isFunctionalTensor(save_var) || at::functionalization::impl::isFunctionalTensor(reserveSpace))) { |
3683 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3684 | TORCH_INTERNAL_ASSERT(false, |
3685 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3686 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3687 | } else { |
3688 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3689 | at::AutoDispatchSkipFunctionalize guard; |
3690 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::cudnn_batch_norm_backward_out::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon, reserveSpace_, out0_, out1_, out2_); |
3691 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
3692 | } |
3693 | } else { |
3694 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
3695 | { |
3696 | at::AutoDispatchSkipFunctionalize guard; |
3697 | tmp_output = at::_ops::cudnn_batch_norm_backward::call(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon, reserveSpace_); |
3698 | } |
3699 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
3700 | at::functionalization::impl::commit_update(out0); |
3701 | at::functionalization::impl::sync(out0); |
3702 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
3703 | at::functionalization::impl::commit_update(out1); |
3704 | at::functionalization::impl::sync(out1); |
3705 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
3706 | at::functionalization::impl::commit_update(out2); |
3707 | at::functionalization::impl::sync(out2); |
3708 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
3709 | } |
3710 | } |
3711 | |
3712 | at::Tensor & cudnn_convolution_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
3713 | if (false) { |
3714 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3715 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3716 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3717 | auto self_meta = to_meta(self); |
3718 | auto weight_meta = to_meta(weight); |
3719 | auto bias_meta = to_meta(bias); |
3720 | auto out_meta = to_meta(out); |
3721 | at::AutoDispatchSkipFunctionalize func_guard; |
3722 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3723 | at::_ops::cudnn_convolution_relu_out::call(self_meta, weight_meta, bias_meta, stride, padding, dilation, groups, out_meta); |
3724 | } |
3725 | |
3726 | at::Tensor self_; |
3727 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3728 | at::functionalization::impl::sync(self); |
3729 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3730 | } else { |
3731 | self_ = self; |
3732 | } |
3733 | |
3734 | at::Tensor weight_; |
3735 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3736 | at::functionalization::impl::sync(weight); |
3737 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3738 | } else { |
3739 | weight_ = weight; |
3740 | } |
3741 | |
3742 | c10::optional<at::Tensor> bias_; |
3743 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3744 | at::functionalization::impl::sync(bias); |
3745 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3746 | } else { |
3747 | bias_ = bias; |
3748 | } |
3749 | |
3750 | at::Tensor out_; |
3751 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3752 | at::functionalization::impl::sync(out); |
3753 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3754 | } else { |
3755 | out_ = out; |
3756 | } |
3757 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3758 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
3759 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3760 | TORCH_INTERNAL_ASSERT(false, |
3761 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3762 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3763 | } else { |
3764 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3765 | at::AutoDispatchSkipFunctionalize guard; |
3766 | at::Tensor tmp_output = at::_ops::cudnn_convolution_relu_out::call(self_, weight_, bias_, stride, padding, dilation, groups, out_); |
3767 | return out;; |
3768 | } |
3769 | } else { |
3770 | at::Tensor tmp_output; |
3771 | { |
3772 | at::AutoDispatchSkipFunctionalize guard; |
3773 | tmp_output = at::_ops::cudnn_convolution_relu::call(self_, weight_, bias_, stride, padding, dilation, groups); |
3774 | } |
3775 | at::functionalization::impl::replace_(out, tmp_output); |
3776 | at::functionalization::impl::commit_update(out); |
3777 | at::functionalization::impl::sync(out); |
3778 | return out; |
3779 | } |
3780 | } |
3781 | |
3782 | at::Tensor & cudnn_convolution_add_relu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { |
3783 | if (false) { |
3784 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3785 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3786 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3787 | auto self_meta = to_meta(self); |
3788 | auto weight_meta = to_meta(weight); |
3789 | auto z_meta = to_meta(z); |
3790 | auto bias_meta = to_meta(bias); |
3791 | auto out_meta = to_meta(out); |
3792 | at::AutoDispatchSkipFunctionalize func_guard; |
3793 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3794 | at::_ops::cudnn_convolution_add_relu_out::call(self_meta, weight_meta, z_meta, alpha, bias_meta, stride, padding, dilation, groups, out_meta); |
3795 | } |
3796 | |
3797 | at::Tensor self_; |
3798 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3799 | at::functionalization::impl::sync(self); |
3800 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3801 | } else { |
3802 | self_ = self; |
3803 | } |
3804 | |
3805 | at::Tensor weight_; |
3806 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
3807 | at::functionalization::impl::sync(weight); |
3808 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
3809 | } else { |
3810 | weight_ = weight; |
3811 | } |
3812 | |
3813 | at::Tensor z_; |
3814 | if (at::functionalization::impl::isFunctionalTensor(z)) { |
3815 | at::functionalization::impl::sync(z); |
3816 | z_ = at::functionalization::impl::from_functional_tensor(z); |
3817 | } else { |
3818 | z_ = z; |
3819 | } |
3820 | |
3821 | c10::optional<at::Tensor> bias_; |
3822 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
3823 | at::functionalization::impl::sync(bias); |
3824 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
3825 | } else { |
3826 | bias_ = bias; |
3827 | } |
3828 | |
3829 | at::Tensor out_; |
3830 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
3831 | at::functionalization::impl::sync(out); |
3832 | out_ = at::functionalization::impl::from_functional_tensor(out); |
3833 | } else { |
3834 | out_ = out; |
3835 | } |
3836 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
3837 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(z) || at::functionalization::impl::isFunctionalTensor(bias))) { |
3838 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3839 | TORCH_INTERNAL_ASSERT(false, |
3840 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3841 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3842 | } else { |
3843 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3844 | at::AutoDispatchSkipFunctionalize guard; |
3845 | at::Tensor tmp_output = at::_ops::cudnn_convolution_add_relu_out::call(self_, weight_, z_, alpha, bias_, stride, padding, dilation, groups, out_); |
3846 | return out;; |
3847 | } |
3848 | } else { |
3849 | at::Tensor tmp_output; |
3850 | { |
3851 | at::AutoDispatchSkipFunctionalize guard; |
3852 | tmp_output = at::_ops::cudnn_convolution_add_relu::call(self_, weight_, z_, alpha, bias_, stride, padding, dilation, groups); |
3853 | } |
3854 | at::functionalization::impl::replace_(out, tmp_output); |
3855 | at::functionalization::impl::commit_update(out); |
3856 | at::functionalization::impl::sync(out); |
3857 | return out; |
3858 | } |
3859 | } |
3860 | |
3861 | ::std::tuple<at::Tensor &,at::Tensor &> cummax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { |
3862 | if (false) { |
3863 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3864 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3865 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3866 | auto self_meta = to_meta(self); |
3867 | auto values_meta = to_meta(values); |
3868 | auto indices_meta = to_meta(indices); |
3869 | at::AutoDispatchSkipFunctionalize func_guard; |
3870 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3871 | at::_ops::cummax_out::call(self_meta, dim, values_meta, indices_meta); |
3872 | } |
3873 | |
3874 | at::Tensor self_; |
3875 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3876 | at::functionalization::impl::sync(self); |
3877 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3878 | } else { |
3879 | self_ = self; |
3880 | } |
3881 | |
3882 | at::Tensor values_; |
3883 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
3884 | at::functionalization::impl::sync(values); |
3885 | values_ = at::functionalization::impl::from_functional_tensor(values); |
3886 | } else { |
3887 | values_ = values; |
3888 | } |
3889 | |
3890 | at::Tensor indices_; |
3891 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
3892 | at::functionalization::impl::sync(indices); |
3893 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
3894 | } else { |
3895 | indices_ = indices; |
3896 | } |
3897 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
3898 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3899 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3900 | TORCH_INTERNAL_ASSERT(false, |
3901 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3902 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3903 | } else { |
3904 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3905 | at::AutoDispatchSkipFunctionalize guard; |
3906 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummax_out::call(self_, dim, values_, indices_); |
3907 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
3908 | } |
3909 | } else { |
3910 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
3911 | { |
3912 | at::AutoDispatchSkipFunctionalize guard; |
3913 | tmp_output = at::_ops::cummax::call(self_, dim); |
3914 | } |
3915 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
3916 | at::functionalization::impl::commit_update(values); |
3917 | at::functionalization::impl::sync(values); |
3918 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
3919 | at::functionalization::impl::commit_update(indices); |
3920 | at::functionalization::impl::sync(indices); |
3921 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
3922 | } |
3923 | } |
3924 | |
3925 | ::std::tuple<at::Tensor &,at::Tensor &> cummax_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { |
3926 | if (false) { |
3927 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3928 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3929 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3930 | auto self_meta = to_meta(self); |
3931 | auto values_meta = to_meta(values); |
3932 | auto indices_meta = to_meta(indices); |
3933 | at::AutoDispatchSkipFunctionalize func_guard; |
3934 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
3935 | at::_ops::cummax_dimname_out::call(self_meta, dim, values_meta, indices_meta); |
3936 | } |
3937 | |
3938 | at::Tensor self_; |
3939 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
3940 | at::functionalization::impl::sync(self); |
3941 | self_ = at::functionalization::impl::from_functional_tensor(self); |
3942 | } else { |
3943 | self_ = self; |
3944 | } |
3945 | |
3946 | at::Tensor values_; |
3947 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
3948 | at::functionalization::impl::sync(values); |
3949 | values_ = at::functionalization::impl::from_functional_tensor(values); |
3950 | } else { |
3951 | values_ = values; |
3952 | } |
3953 | |
3954 | at::Tensor indices_; |
3955 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
3956 | at::functionalization::impl::sync(indices); |
3957 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
3958 | } else { |
3959 | indices_ = indices; |
3960 | } |
3961 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
3962 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
3963 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
3964 | TORCH_INTERNAL_ASSERT(false, |
3965 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
3966 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
3967 | } else { |
3968 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
3969 | at::AutoDispatchSkipFunctionalize guard; |
3970 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::cummax_dimname_out::call(self_, dim, values_, indices_); |
3971 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
3972 | } |
3973 | } else { |
3974 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
3975 | { |
3976 | at::AutoDispatchSkipFunctionalize guard; |
3977 | tmp_output = at::_ops::cummax_dimname::call(self_, dim); |
3978 | } |
3979 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
3980 | at::functionalization::impl::commit_update(values); |
3981 | at::functionalization::impl::sync(values); |
3982 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
3983 | at::functionalization::impl::commit_update(indices); |
3984 | at::functionalization::impl::sync(indices); |
3985 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
3986 | } |
3987 | } |
3988 | |
3989 | at::Tensor & diff_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) { |
3990 | if (false) { |
3991 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
3992 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
3993 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
3994 | auto self_meta = to_meta(self); |
3995 | auto prepend_meta = to_meta(prepend); |
3996 | auto append_meta = to_meta(append); |
3997 | auto out_meta = to_meta(out); |
3998 | at::AutoDispatchSkipFunctionalize func_guard; |
3999 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4000 | at::_ops::diff_out::call(self_meta, n, dim, prepend_meta, append_meta, out_meta); |
4001 | } |
4002 | |
4003 | at::Tensor self_; |
4004 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4005 | at::functionalization::impl::sync(self); |
4006 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4007 | } else { |
4008 | self_ = self; |
4009 | } |
4010 | |
4011 | c10::optional<at::Tensor> prepend_; |
4012 | if (at::functionalization::impl::isFunctionalTensor(prepend)) { |
4013 | at::functionalization::impl::sync(prepend); |
4014 | prepend_ = at::functionalization::impl::from_functional_tensor(prepend); |
4015 | } else { |
4016 | prepend_ = prepend; |
4017 | } |
4018 | |
4019 | c10::optional<at::Tensor> append_; |
4020 | if (at::functionalization::impl::isFunctionalTensor(append)) { |
4021 | at::functionalization::impl::sync(append); |
4022 | append_ = at::functionalization::impl::from_functional_tensor(append); |
4023 | } else { |
4024 | append_ = append; |
4025 | } |
4026 | |
4027 | at::Tensor out_; |
4028 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4029 | at::functionalization::impl::sync(out); |
4030 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4031 | } else { |
4032 | out_ = out; |
4033 | } |
4034 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4035 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(prepend) || at::functionalization::impl::isFunctionalTensor(append))) { |
4036 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4037 | TORCH_INTERNAL_ASSERT(false, |
4038 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4039 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4040 | } else { |
4041 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4042 | at::AutoDispatchSkipFunctionalize guard; |
4043 | at::Tensor tmp_output = at::_ops::diff_out::call(self_, n, dim, prepend_, append_, out_); |
4044 | return out;; |
4045 | } |
4046 | } else { |
4047 | at::Tensor tmp_output; |
4048 | { |
4049 | at::AutoDispatchSkipFunctionalize guard; |
4050 | tmp_output = at::_ops::diff::call(self_, n, dim, prepend_, append_); |
4051 | } |
4052 | at::functionalization::impl::replace_(out, tmp_output); |
4053 | at::functionalization::impl::commit_update(out); |
4054 | at::functionalization::impl::sync(out); |
4055 | return out; |
4056 | } |
4057 | } |
4058 | |
4059 | at::Tensor & embedding_renorm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { |
4060 | if (false) { |
4061 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4062 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4063 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4064 | auto self_meta = to_meta(self); |
4065 | auto indices_meta = to_meta(indices); |
4066 | auto out_meta = to_meta(out); |
4067 | at::AutoDispatchSkipFunctionalize func_guard; |
4068 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4069 | at::_ops::embedding_renorm_out::call(self_meta, indices_meta, max_norm, norm_type, out_meta); |
4070 | } |
4071 | |
4072 | at::Tensor self_; |
4073 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4074 | at::functionalization::impl::sync(self); |
4075 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4076 | } else { |
4077 | self_ = self; |
4078 | } |
4079 | |
4080 | at::Tensor indices_; |
4081 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4082 | at::functionalization::impl::sync(indices); |
4083 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4084 | } else { |
4085 | indices_ = indices; |
4086 | } |
4087 | |
4088 | at::Tensor out_; |
4089 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4090 | at::functionalization::impl::sync(out); |
4091 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4092 | } else { |
4093 | out_ = out; |
4094 | } |
4095 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4096 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) { |
4097 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4098 | TORCH_INTERNAL_ASSERT(false, |
4099 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4100 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4101 | } else { |
4102 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4103 | at::AutoDispatchSkipFunctionalize guard; |
4104 | at::Tensor tmp_output = at::_ops::embedding_renorm_out::call(self_, indices_, max_norm, norm_type, out_); |
4105 | return out;; |
4106 | } |
4107 | } else { |
4108 | at::Tensor tmp_output; |
4109 | { |
4110 | at::AutoDispatchSkipFunctionalize guard; |
4111 | tmp_output = at::_ops::embedding_renorm::call(self_, indices_, max_norm, norm_type); |
4112 | } |
4113 | at::functionalization::impl::replace_(out, tmp_output); |
4114 | at::functionalization::impl::commit_update(out); |
4115 | at::functionalization::impl::sync(out); |
4116 | return out; |
4117 | } |
4118 | } |
4119 | |
4120 | at::Tensor & embedding_renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { |
4121 | if (true) { |
4122 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4123 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4124 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4125 | auto self_meta = to_meta(self); |
4126 | auto indices_meta = to_meta(indices); |
4127 | at::AutoDispatchSkipFunctionalize func_guard; |
4128 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4129 | at::_ops::embedding_renorm_::call(self_meta, indices_meta, max_norm, norm_type); |
4130 | } |
4131 | |
4132 | at::Tensor self_; |
4133 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4134 | at::functionalization::impl::sync(self); |
4135 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4136 | } else { |
4137 | self_ = self; |
4138 | } |
4139 | |
4140 | at::Tensor indices_; |
4141 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4142 | at::functionalization::impl::sync(indices); |
4143 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4144 | } else { |
4145 | indices_ = indices; |
4146 | } |
4147 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4148 | if ((false || at::functionalization::impl::isFunctionalTensor(indices))) { |
4149 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4150 | TORCH_INTERNAL_ASSERT(false, |
4151 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4152 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4153 | } else { |
4154 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4155 | at::AutoDispatchSkipFunctionalize guard; |
4156 | at::Tensor tmp_output = at::_ops::embedding_renorm_::call(self_, indices_, max_norm, norm_type); |
4157 | return self;; |
4158 | } |
4159 | } else { |
4160 | at::Tensor tmp_output; |
4161 | { |
4162 | at::AutoDispatchSkipFunctionalize guard; |
4163 | tmp_output = at::_ops::embedding_renorm::call(self_, indices_, max_norm, norm_type); |
4164 | } |
4165 | at::functionalization::impl::replace_(self, tmp_output); |
4166 | at::functionalization::impl::commit_update(self); |
4167 | at::functionalization::impl::sync(self); |
4168 | return self; |
4169 | } |
4170 | } |
4171 | |
4172 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_forward_only_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { |
4173 | if (false) { |
4174 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4175 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4176 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4177 | auto weight_meta = to_meta(weight); |
4178 | auto indices_meta = to_meta(indices); |
4179 | auto offsets_meta = to_meta(offsets); |
4180 | auto per_sample_weights_meta = to_meta(per_sample_weights); |
4181 | auto out0_meta = to_meta(out0); |
4182 | auto out1_meta = to_meta(out1); |
4183 | auto out2_meta = to_meta(out2); |
4184 | auto out3_meta = to_meta(out3); |
4185 | at::AutoDispatchSkipFunctionalize func_guard; |
4186 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4187 | at::_ops::_embedding_bag_forward_only_out::call(weight_meta, indices_meta, offsets_meta, scale_grad_by_freq, mode, sparse, per_sample_weights_meta, include_last_offset, padding_idx, out0_meta, out1_meta, out2_meta, out3_meta); |
4188 | } |
4189 | |
4190 | at::Tensor weight_; |
4191 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
4192 | at::functionalization::impl::sync(weight); |
4193 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
4194 | } else { |
4195 | weight_ = weight; |
4196 | } |
4197 | |
4198 | at::Tensor indices_; |
4199 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4200 | at::functionalization::impl::sync(indices); |
4201 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4202 | } else { |
4203 | indices_ = indices; |
4204 | } |
4205 | |
4206 | at::Tensor offsets_; |
4207 | if (at::functionalization::impl::isFunctionalTensor(offsets)) { |
4208 | at::functionalization::impl::sync(offsets); |
4209 | offsets_ = at::functionalization::impl::from_functional_tensor(offsets); |
4210 | } else { |
4211 | offsets_ = offsets; |
4212 | } |
4213 | |
4214 | c10::optional<at::Tensor> per_sample_weights_; |
4215 | if (at::functionalization::impl::isFunctionalTensor(per_sample_weights)) { |
4216 | at::functionalization::impl::sync(per_sample_weights); |
4217 | per_sample_weights_ = at::functionalization::impl::from_functional_tensor(per_sample_weights); |
4218 | } else { |
4219 | per_sample_weights_ = per_sample_weights; |
4220 | } |
4221 | |
4222 | at::Tensor out0_; |
4223 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
4224 | at::functionalization::impl::sync(out0); |
4225 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
4226 | } else { |
4227 | out0_ = out0; |
4228 | } |
4229 | |
4230 | at::Tensor out1_; |
4231 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
4232 | at::functionalization::impl::sync(out1); |
4233 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
4234 | } else { |
4235 | out1_ = out1; |
4236 | } |
4237 | |
4238 | at::Tensor out2_; |
4239 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
4240 | at::functionalization::impl::sync(out2); |
4241 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
4242 | } else { |
4243 | out2_ = out2; |
4244 | } |
4245 | |
4246 | at::Tensor out3_; |
4247 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
4248 | at::functionalization::impl::sync(out3); |
4249 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
4250 | } else { |
4251 | out3_ = out3; |
4252 | } |
4253 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) { |
4254 | if ((false || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offsets) || at::functionalization::impl::isFunctionalTensor(per_sample_weights))) { |
4255 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4256 | TORCH_INTERNAL_ASSERT(false, |
4257 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4258 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4259 | } else { |
4260 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4261 | at::AutoDispatchSkipFunctionalize guard; |
4262 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_embedding_bag_forward_only_out::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx, out0_, out1_, out2_, out3_); |
4263 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3);; |
4264 | } |
4265 | } else { |
4266 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
4267 | { |
4268 | at::AutoDispatchSkipFunctionalize guard; |
4269 | tmp_output = at::_ops::_embedding_bag_forward_only::call(weight_, indices_, offsets_, scale_grad_by_freq, mode, sparse, per_sample_weights_, include_last_offset, padding_idx); |
4270 | } |
4271 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
4272 | at::functionalization::impl::commit_update(out0); |
4273 | at::functionalization::impl::sync(out0); |
4274 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
4275 | at::functionalization::impl::commit_update(out1); |
4276 | at::functionalization::impl::sync(out1); |
4277 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
4278 | at::functionalization::impl::commit_update(out2); |
4279 | at::functionalization::impl::sync(out2); |
4280 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
4281 | at::functionalization::impl::commit_update(out3); |
4282 | at::functionalization::impl::sync(out3); |
4283 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3); |
4284 | } |
4285 | } |
4286 | |
4287 | at::Tensor & _embedding_bag_dense_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out) { |
4288 | if (false) { |
4289 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4290 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4291 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4292 | auto grad_meta = to_meta(grad); |
4293 | auto indices_meta = to_meta(indices); |
4294 | auto offset2bag_meta = to_meta(offset2bag); |
4295 | auto bag_size_meta = to_meta(bag_size); |
4296 | auto maximum_indices_meta = to_meta(maximum_indices); |
4297 | auto per_sample_weights_meta = to_meta(per_sample_weights); |
4298 | auto out_meta = to_meta(out); |
4299 | at::AutoDispatchSkipFunctionalize func_guard; |
4300 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4301 | at::_ops::_embedding_bag_dense_backward_out::call(grad_meta, indices_meta, offset2bag_meta, bag_size_meta, maximum_indices_meta, num_weights, scale_grad_by_freq, mode, per_sample_weights_meta, padding_idx, out_meta); |
4302 | } |
4303 | |
4304 | at::Tensor grad_; |
4305 | if (at::functionalization::impl::isFunctionalTensor(grad)) { |
4306 | at::functionalization::impl::sync(grad); |
4307 | grad_ = at::functionalization::impl::from_functional_tensor(grad); |
4308 | } else { |
4309 | grad_ = grad; |
4310 | } |
4311 | |
4312 | at::Tensor indices_; |
4313 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
4314 | at::functionalization::impl::sync(indices); |
4315 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
4316 | } else { |
4317 | indices_ = indices; |
4318 | } |
4319 | |
4320 | at::Tensor offset2bag_; |
4321 | if (at::functionalization::impl::isFunctionalTensor(offset2bag)) { |
4322 | at::functionalization::impl::sync(offset2bag); |
4323 | offset2bag_ = at::functionalization::impl::from_functional_tensor(offset2bag); |
4324 | } else { |
4325 | offset2bag_ = offset2bag; |
4326 | } |
4327 | |
4328 | at::Tensor bag_size_; |
4329 | if (at::functionalization::impl::isFunctionalTensor(bag_size)) { |
4330 | at::functionalization::impl::sync(bag_size); |
4331 | bag_size_ = at::functionalization::impl::from_functional_tensor(bag_size); |
4332 | } else { |
4333 | bag_size_ = bag_size; |
4334 | } |
4335 | |
4336 | at::Tensor maximum_indices_; |
4337 | if (at::functionalization::impl::isFunctionalTensor(maximum_indices)) { |
4338 | at::functionalization::impl::sync(maximum_indices); |
4339 | maximum_indices_ = at::functionalization::impl::from_functional_tensor(maximum_indices); |
4340 | } else { |
4341 | maximum_indices_ = maximum_indices; |
4342 | } |
4343 | |
4344 | c10::optional<at::Tensor> per_sample_weights_; |
4345 | if (at::functionalization::impl::isFunctionalTensor(per_sample_weights)) { |
4346 | at::functionalization::impl::sync(per_sample_weights); |
4347 | per_sample_weights_ = at::functionalization::impl::from_functional_tensor(per_sample_weights); |
4348 | } else { |
4349 | per_sample_weights_ = per_sample_weights; |
4350 | } |
4351 | |
4352 | at::Tensor out_; |
4353 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4354 | at::functionalization::impl::sync(out); |
4355 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4356 | } else { |
4357 | out_ = out; |
4358 | } |
4359 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4360 | if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(offset2bag) || at::functionalization::impl::isFunctionalTensor(bag_size) || at::functionalization::impl::isFunctionalTensor(maximum_indices) || at::functionalization::impl::isFunctionalTensor(per_sample_weights))) { |
4361 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4362 | TORCH_INTERNAL_ASSERT(false, |
4363 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4364 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4365 | } else { |
4366 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4367 | at::AutoDispatchSkipFunctionalize guard; |
4368 | at::Tensor tmp_output = at::_ops::_embedding_bag_dense_backward_out::call(grad_, indices_, offset2bag_, bag_size_, maximum_indices_, num_weights, scale_grad_by_freq, mode, per_sample_weights_, padding_idx, out_); |
4369 | return out;; |
4370 | } |
4371 | } else { |
4372 | at::Tensor tmp_output; |
4373 | { |
4374 | at::AutoDispatchSkipFunctionalize guard; |
4375 | tmp_output = at::_ops::_embedding_bag_dense_backward::call(grad_, indices_, offset2bag_, bag_size_, maximum_indices_, num_weights, scale_grad_by_freq, mode, per_sample_weights_, padding_idx); |
4376 | } |
4377 | at::functionalization::impl::replace_(out, tmp_output); |
4378 | at::functionalization::impl::commit_update(out); |
4379 | at::functionalization::impl::sync(out); |
4380 | return out; |
4381 | } |
4382 | } |
4383 | |
4384 | at::Tensor & empty_out_names_out(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional<at::DimnameList> names, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4385 | if (false) { |
4386 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4387 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4388 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4389 | auto out_meta = to_meta(out); |
4390 | at::AutoDispatchSkipFunctionalize func_guard; |
4391 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4392 | at::_ops::empty_names_out::call(size, names, memory_format, out_meta); |
4393 | } |
4394 | |
4395 | at::Tensor out_; |
4396 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4397 | at::functionalization::impl::sync(out); |
4398 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4399 | } else { |
4400 | out_ = out; |
4401 | } |
4402 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4403 | if ((false)) { |
4404 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4405 | TORCH_INTERNAL_ASSERT(false, |
4406 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4407 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4408 | } else { |
4409 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4410 | at::AutoDispatchSkipFunctionalize guard; |
4411 | at::Tensor tmp_output = at::_ops::empty_names_out::call(size, names, memory_format, out_); |
4412 | return out;; |
4413 | } |
4414 | } else { |
4415 | at::Tensor tmp_output; |
4416 | { |
4417 | at::AutoDispatchSkipFunctionalize guard; |
4418 | tmp_output = at::_ops::empty_names::call(size, names, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
4419 | } |
4420 | at::functionalization::impl::replace_(out, tmp_output); |
4421 | at::functionalization::impl::commit_update(out); |
4422 | at::functionalization::impl::sync(out); |
4423 | return out; |
4424 | } |
4425 | } |
4426 | |
4427 | at::Tensor & empty_out_out(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4428 | if (false) { |
4429 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4430 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4431 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4432 | auto out_meta = to_meta(out); |
4433 | at::AutoDispatchSkipFunctionalize func_guard; |
4434 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4435 | at::_ops::empty_out::call(size, memory_format, out_meta); |
4436 | } |
4437 | |
4438 | at::Tensor out_; |
4439 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4440 | at::functionalization::impl::sync(out); |
4441 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4442 | } else { |
4443 | out_ = out; |
4444 | } |
4445 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4446 | if ((false)) { |
4447 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4448 | TORCH_INTERNAL_ASSERT(false, |
4449 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4450 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4451 | } else { |
4452 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4453 | at::AutoDispatchSkipFunctionalize guard; |
4454 | at::Tensor tmp_output = at::_ops::empty_out::call(size, memory_format, out_); |
4455 | return out;; |
4456 | } |
4457 | } else { |
4458 | at::Tensor tmp_output; |
4459 | { |
4460 | at::AutoDispatchSkipFunctionalize guard; |
4461 | tmp_output = at::_ops::empty_memory_format::call(size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
4462 | } |
4463 | at::functionalization::impl::replace_(out, tmp_output); |
4464 | at::functionalization::impl::commit_update(out); |
4465 | at::functionalization::impl::sync(out); |
4466 | return out; |
4467 | } |
4468 | } |
4469 | |
4470 | at::Tensor & empty_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
4471 | if (false) { |
4472 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4473 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4474 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4475 | auto self_meta = to_meta(self); |
4476 | auto out_meta = to_meta(out); |
4477 | at::AutoDispatchSkipFunctionalize func_guard; |
4478 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4479 | at::_ops::empty_like_out::call(self_meta, memory_format, out_meta); |
4480 | } |
4481 | |
4482 | at::Tensor self_; |
4483 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4484 | at::functionalization::impl::sync(self); |
4485 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4486 | } else { |
4487 | self_ = self; |
4488 | } |
4489 | |
4490 | at::Tensor out_; |
4491 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4492 | at::functionalization::impl::sync(out); |
4493 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4494 | } else { |
4495 | out_ = out; |
4496 | } |
4497 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4498 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4499 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4500 | TORCH_INTERNAL_ASSERT(false, |
4501 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4502 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4503 | } else { |
4504 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4505 | at::AutoDispatchSkipFunctionalize guard; |
4506 | at::Tensor tmp_output = at::_ops::empty_like_out::call(self_, memory_format, out_); |
4507 | return out;; |
4508 | } |
4509 | } else { |
4510 | at::Tensor tmp_output; |
4511 | { |
4512 | at::AutoDispatchSkipFunctionalize guard; |
4513 | tmp_output = at::_ops::empty_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
4514 | } |
4515 | at::functionalization::impl::replace_(out, tmp_output); |
4516 | at::functionalization::impl::commit_update(out); |
4517 | at::functionalization::impl::sync(out); |
4518 | return out; |
4519 | } |
4520 | } |
4521 | |
4522 | at::Tensor & erf_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
4523 | if (false) { |
4524 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4525 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4526 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4527 | auto self_meta = to_meta(self); |
4528 | auto out_meta = to_meta(out); |
4529 | at::AutoDispatchSkipFunctionalize func_guard; |
4530 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4531 | at::_ops::erf_out::call(self_meta, out_meta); |
4532 | } |
4533 | |
4534 | at::Tensor self_; |
4535 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4536 | at::functionalization::impl::sync(self); |
4537 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4538 | } else { |
4539 | self_ = self; |
4540 | } |
4541 | |
4542 | at::Tensor out_; |
4543 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4544 | at::functionalization::impl::sync(out); |
4545 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4546 | } else { |
4547 | out_ = out; |
4548 | } |
4549 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4550 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4551 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4552 | TORCH_INTERNAL_ASSERT(false, |
4553 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4554 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4555 | } else { |
4556 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4557 | at::AutoDispatchSkipFunctionalize guard; |
4558 | at::Tensor tmp_output = at::_ops::erf_out::call(self_, out_); |
4559 | return out;; |
4560 | } |
4561 | } else { |
4562 | at::Tensor tmp_output; |
4563 | { |
4564 | at::AutoDispatchSkipFunctionalize guard; |
4565 | tmp_output = at::_ops::erf::call(self_); |
4566 | } |
4567 | at::functionalization::impl::replace_(out, tmp_output); |
4568 | at::functionalization::impl::commit_update(out); |
4569 | at::functionalization::impl::sync(out); |
4570 | return out; |
4571 | } |
4572 | } |
4573 | |
4574 | at::Tensor & erf_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
4575 | if (true) { |
4576 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4577 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4578 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4579 | auto self_meta = to_meta(self); |
4580 | at::AutoDispatchSkipFunctionalize func_guard; |
4581 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4582 | at::_ops::erf_::call(self_meta); |
4583 | } |
4584 | |
4585 | at::Tensor self_; |
4586 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4587 | at::functionalization::impl::sync(self); |
4588 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4589 | } else { |
4590 | self_ = self; |
4591 | } |
4592 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4593 | if ((false)) { |
4594 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4595 | TORCH_INTERNAL_ASSERT(false, |
4596 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4597 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4598 | } else { |
4599 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4600 | at::AutoDispatchSkipFunctionalize guard; |
4601 | at::Tensor tmp_output = at::_ops::erf_::call(self_); |
4602 | return self;; |
4603 | } |
4604 | } else { |
4605 | at::Tensor tmp_output; |
4606 | { |
4607 | at::AutoDispatchSkipFunctionalize guard; |
4608 | tmp_output = at::_ops::erf::call(self_); |
4609 | } |
4610 | at::functionalization::impl::replace_(self, tmp_output); |
4611 | at::functionalization::impl::commit_update(self); |
4612 | at::functionalization::impl::sync(self); |
4613 | return self; |
4614 | } |
4615 | } |
4616 | |
4617 | at::Tensor & erfc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
4618 | if (false) { |
4619 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4620 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4621 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4622 | auto self_meta = to_meta(self); |
4623 | auto out_meta = to_meta(out); |
4624 | at::AutoDispatchSkipFunctionalize func_guard; |
4625 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4626 | at::_ops::erfc_out::call(self_meta, out_meta); |
4627 | } |
4628 | |
4629 | at::Tensor self_; |
4630 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4631 | at::functionalization::impl::sync(self); |
4632 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4633 | } else { |
4634 | self_ = self; |
4635 | } |
4636 | |
4637 | at::Tensor out_; |
4638 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4639 | at::functionalization::impl::sync(out); |
4640 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4641 | } else { |
4642 | out_ = out; |
4643 | } |
4644 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4645 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
4646 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4647 | TORCH_INTERNAL_ASSERT(false, |
4648 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4649 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4650 | } else { |
4651 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4652 | at::AutoDispatchSkipFunctionalize guard; |
4653 | at::Tensor tmp_output = at::_ops::erfc_out::call(self_, out_); |
4654 | return out;; |
4655 | } |
4656 | } else { |
4657 | at::Tensor tmp_output; |
4658 | { |
4659 | at::AutoDispatchSkipFunctionalize guard; |
4660 | tmp_output = at::_ops::erfc::call(self_); |
4661 | } |
4662 | at::functionalization::impl::replace_(out, tmp_output); |
4663 | at::functionalization::impl::commit_update(out); |
4664 | at::functionalization::impl::sync(out); |
4665 | return out; |
4666 | } |
4667 | } |
4668 | |
4669 | at::Tensor & erfc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
4670 | if (true) { |
4671 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4672 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4673 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4674 | auto self_meta = to_meta(self); |
4675 | at::AutoDispatchSkipFunctionalize func_guard; |
4676 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4677 | at::_ops::erfc_::call(self_meta); |
4678 | } |
4679 | |
4680 | at::Tensor self_; |
4681 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4682 | at::functionalization::impl::sync(self); |
4683 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4684 | } else { |
4685 | self_ = self; |
4686 | } |
4687 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4688 | if ((false)) { |
4689 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4690 | TORCH_INTERNAL_ASSERT(false, |
4691 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4692 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4693 | } else { |
4694 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4695 | at::AutoDispatchSkipFunctionalize guard; |
4696 | at::Tensor tmp_output = at::_ops::erfc_::call(self_); |
4697 | return self;; |
4698 | } |
4699 | } else { |
4700 | at::Tensor tmp_output; |
4701 | { |
4702 | at::AutoDispatchSkipFunctionalize guard; |
4703 | tmp_output = at::_ops::erfc::call(self_); |
4704 | } |
4705 | at::functionalization::impl::replace_(self, tmp_output); |
4706 | at::functionalization::impl::commit_update(self); |
4707 | at::functionalization::impl::sync(self); |
4708 | return self; |
4709 | } |
4710 | } |
4711 | |
4712 | at::Tensor & eye_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) { |
4713 | if (false) { |
4714 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4715 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4716 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4717 | auto out_meta = to_meta(out); |
4718 | at::AutoDispatchSkipFunctionalize func_guard; |
4719 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4720 | at::_ops::eye_out::call(n, out_meta); |
4721 | } |
4722 | |
4723 | at::Tensor out_; |
4724 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4725 | at::functionalization::impl::sync(out); |
4726 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4727 | } else { |
4728 | out_ = out; |
4729 | } |
4730 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4731 | if ((false)) { |
4732 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4733 | TORCH_INTERNAL_ASSERT(false, |
4734 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4735 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4736 | } else { |
4737 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4738 | at::AutoDispatchSkipFunctionalize guard; |
4739 | at::Tensor tmp_output = at::_ops::eye_out::call(n, out_); |
4740 | return out;; |
4741 | } |
4742 | } else { |
4743 | at::Tensor tmp_output; |
4744 | { |
4745 | at::AutoDispatchSkipFunctionalize guard; |
4746 | tmp_output = at::_ops::eye::call(n, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4747 | } |
4748 | at::functionalization::impl::replace_(out, tmp_output); |
4749 | at::functionalization::impl::commit_update(out); |
4750 | at::functionalization::impl::sync(out); |
4751 | return out; |
4752 | } |
4753 | } |
4754 | |
4755 | at::Tensor & eye_out_m_out(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, at::Tensor & out) { |
4756 | if (false) { |
4757 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4758 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4759 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4760 | auto out_meta = to_meta(out); |
4761 | at::AutoDispatchSkipFunctionalize func_guard; |
4762 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4763 | at::_ops::eye_m_out::call(n, m, out_meta); |
4764 | } |
4765 | |
4766 | at::Tensor out_; |
4767 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4768 | at::functionalization::impl::sync(out); |
4769 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4770 | } else { |
4771 | out_ = out; |
4772 | } |
4773 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4774 | if ((false)) { |
4775 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4776 | TORCH_INTERNAL_ASSERT(false, |
4777 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4778 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4779 | } else { |
4780 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4781 | at::AutoDispatchSkipFunctionalize guard; |
4782 | at::Tensor tmp_output = at::_ops::eye_m_out::call(n, m, out_); |
4783 | return out;; |
4784 | } |
4785 | } else { |
4786 | at::Tensor tmp_output; |
4787 | { |
4788 | at::AutoDispatchSkipFunctionalize guard; |
4789 | tmp_output = at::_ops::eye_m::call(n, m, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
4790 | } |
4791 | at::functionalization::impl::replace_(out, tmp_output); |
4792 | at::functionalization::impl::commit_update(out); |
4793 | at::functionalization::impl::sync(out); |
4794 | return out; |
4795 | } |
4796 | } |
4797 | |
4798 | at::Tensor & gcd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
4799 | if (false) { |
4800 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4801 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4802 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4803 | auto self_meta = to_meta(self); |
4804 | auto other_meta = to_meta(other); |
4805 | auto out_meta = to_meta(out); |
4806 | at::AutoDispatchSkipFunctionalize func_guard; |
4807 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4808 | at::_ops::gcd_out::call(self_meta, other_meta, out_meta); |
4809 | } |
4810 | |
4811 | at::Tensor self_; |
4812 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4813 | at::functionalization::impl::sync(self); |
4814 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4815 | } else { |
4816 | self_ = self; |
4817 | } |
4818 | |
4819 | at::Tensor other_; |
4820 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
4821 | at::functionalization::impl::sync(other); |
4822 | other_ = at::functionalization::impl::from_functional_tensor(other); |
4823 | } else { |
4824 | other_ = other; |
4825 | } |
4826 | |
4827 | at::Tensor out_; |
4828 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
4829 | at::functionalization::impl::sync(out); |
4830 | out_ = at::functionalization::impl::from_functional_tensor(out); |
4831 | } else { |
4832 | out_ = out; |
4833 | } |
4834 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
4835 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
4836 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4837 | TORCH_INTERNAL_ASSERT(false, |
4838 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4839 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4840 | } else { |
4841 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4842 | at::AutoDispatchSkipFunctionalize guard; |
4843 | at::Tensor tmp_output = at::_ops::gcd_out::call(self_, other_, out_); |
4844 | return out;; |
4845 | } |
4846 | } else { |
4847 | at::Tensor tmp_output; |
4848 | { |
4849 | at::AutoDispatchSkipFunctionalize guard; |
4850 | tmp_output = at::_ops::gcd::call(self_, other_); |
4851 | } |
4852 | at::functionalization::impl::replace_(out, tmp_output); |
4853 | at::functionalization::impl::commit_update(out); |
4854 | at::functionalization::impl::sync(out); |
4855 | return out; |
4856 | } |
4857 | } |
4858 | |
4859 | at::Tensor & gcd_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
4860 | if (true) { |
4861 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4862 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4863 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4864 | auto self_meta = to_meta(self); |
4865 | auto other_meta = to_meta(other); |
4866 | at::AutoDispatchSkipFunctionalize func_guard; |
4867 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4868 | at::_ops::gcd_::call(self_meta, other_meta); |
4869 | } |
4870 | |
4871 | at::Tensor self_; |
4872 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
4873 | at::functionalization::impl::sync(self); |
4874 | self_ = at::functionalization::impl::from_functional_tensor(self); |
4875 | } else { |
4876 | self_ = self; |
4877 | } |
4878 | |
4879 | at::Tensor other_; |
4880 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
4881 | at::functionalization::impl::sync(other); |
4882 | other_ = at::functionalization::impl::from_functional_tensor(other); |
4883 | } else { |
4884 | other_ = other; |
4885 | } |
4886 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
4887 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
4888 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4889 | TORCH_INTERNAL_ASSERT(false, |
4890 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4891 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4892 | } else { |
4893 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4894 | at::AutoDispatchSkipFunctionalize guard; |
4895 | at::Tensor tmp_output = at::_ops::gcd_::call(self_, other_); |
4896 | return self;; |
4897 | } |
4898 | } else { |
4899 | at::Tensor tmp_output; |
4900 | { |
4901 | at::AutoDispatchSkipFunctionalize guard; |
4902 | tmp_output = at::_ops::gcd::call(self_, other_); |
4903 | } |
4904 | at::functionalization::impl::replace_(self, tmp_output); |
4905 | at::functionalization::impl::commit_update(self); |
4906 | at::functionalization::impl::sync(self); |
4907 | return self; |
4908 | } |
4909 | } |
4910 | |
4911 | ::std::tuple<at::Tensor &,at::Tensor &> grid_sampler_2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask, at::Tensor & out0, at::Tensor & out1) { |
4912 | if (false) { |
4913 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4914 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4915 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4916 | auto grad_output_meta = to_meta(grad_output); |
4917 | auto input_meta = to_meta(input); |
4918 | auto grid_meta = to_meta(grid); |
4919 | auto out0_meta = to_meta(out0); |
4920 | auto out1_meta = to_meta(out1); |
4921 | at::AutoDispatchSkipFunctionalize func_guard; |
4922 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
4923 | at::_ops::grid_sampler_2d_backward_out::call(grad_output_meta, input_meta, grid_meta, interpolation_mode, padding_mode, align_corners, output_mask, out0_meta, out1_meta); |
4924 | } |
4925 | |
4926 | at::Tensor grad_output_; |
4927 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
4928 | at::functionalization::impl::sync(grad_output); |
4929 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
4930 | } else { |
4931 | grad_output_ = grad_output; |
4932 | } |
4933 | |
4934 | at::Tensor input_; |
4935 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
4936 | at::functionalization::impl::sync(input); |
4937 | input_ = at::functionalization::impl::from_functional_tensor(input); |
4938 | } else { |
4939 | input_ = input; |
4940 | } |
4941 | |
4942 | at::Tensor grid_; |
4943 | if (at::functionalization::impl::isFunctionalTensor(grid)) { |
4944 | at::functionalization::impl::sync(grid); |
4945 | grid_ = at::functionalization::impl::from_functional_tensor(grid); |
4946 | } else { |
4947 | grid_ = grid; |
4948 | } |
4949 | |
4950 | at::Tensor out0_; |
4951 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
4952 | at::functionalization::impl::sync(out0); |
4953 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
4954 | } else { |
4955 | out0_ = out0; |
4956 | } |
4957 | |
4958 | at::Tensor out1_; |
4959 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
4960 | at::functionalization::impl::sync(out1); |
4961 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
4962 | } else { |
4963 | out1_ = out1; |
4964 | } |
4965 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
4966 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(grid))) { |
4967 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
4968 | TORCH_INTERNAL_ASSERT(false, |
4969 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
4970 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
4971 | } else { |
4972 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
4973 | at::AutoDispatchSkipFunctionalize guard; |
4974 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::grid_sampler_2d_backward_out::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask, out0_, out1_); |
4975 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
4976 | } |
4977 | } else { |
4978 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
4979 | { |
4980 | at::AutoDispatchSkipFunctionalize guard; |
4981 | tmp_output = at::_ops::grid_sampler_2d_backward::call(grad_output_, input_, grid_, interpolation_mode, padding_mode, align_corners, output_mask); |
4982 | } |
4983 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
4984 | at::functionalization::impl::commit_update(out0); |
4985 | at::functionalization::impl::sync(out0); |
4986 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
4987 | at::functionalization::impl::commit_update(out1); |
4988 | at::functionalization::impl::sync(out1); |
4989 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
4990 | } |
4991 | } |
4992 | |
4993 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_group_norm_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
4994 | if (false) { |
4995 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
4996 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
4997 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
4998 | auto grad_out_meta = to_meta(grad_out); |
4999 | auto input_meta = to_meta(input); |
5000 | auto mean_meta = to_meta(mean); |
5001 | auto rstd_meta = to_meta(rstd); |
5002 | auto weight_meta = to_meta(weight); |
5003 | auto out0_meta = to_meta(out0); |
5004 | auto out1_meta = to_meta(out1); |
5005 | auto out2_meta = to_meta(out2); |
5006 | at::AutoDispatchSkipFunctionalize func_guard; |
5007 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5008 | at::_ops::native_group_norm_backward_out::call(grad_out_meta, input_meta, mean_meta, rstd_meta, weight_meta, N, C, HxW, group, output_mask, out0_meta, out1_meta, out2_meta); |
5009 | } |
5010 | |
5011 | at::Tensor grad_out_; |
5012 | if (at::functionalization::impl::isFunctionalTensor(grad_out)) { |
5013 | at::functionalization::impl::sync(grad_out); |
5014 | grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out); |
5015 | } else { |
5016 | grad_out_ = grad_out; |
5017 | } |
5018 | |
5019 | at::Tensor input_; |
5020 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5021 | at::functionalization::impl::sync(input); |
5022 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5023 | } else { |
5024 | input_ = input; |
5025 | } |
5026 | |
5027 | at::Tensor mean_; |
5028 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
5029 | at::functionalization::impl::sync(mean); |
5030 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
5031 | } else { |
5032 | mean_ = mean; |
5033 | } |
5034 | |
5035 | at::Tensor rstd_; |
5036 | if (at::functionalization::impl::isFunctionalTensor(rstd)) { |
5037 | at::functionalization::impl::sync(rstd); |
5038 | rstd_ = at::functionalization::impl::from_functional_tensor(rstd); |
5039 | } else { |
5040 | rstd_ = rstd; |
5041 | } |
5042 | |
5043 | c10::optional<at::Tensor> weight_; |
5044 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5045 | at::functionalization::impl::sync(weight); |
5046 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5047 | } else { |
5048 | weight_ = weight; |
5049 | } |
5050 | |
5051 | at::Tensor out0_; |
5052 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5053 | at::functionalization::impl::sync(out0); |
5054 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5055 | } else { |
5056 | out0_ = out0; |
5057 | } |
5058 | |
5059 | at::Tensor out1_; |
5060 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5061 | at::functionalization::impl::sync(out1); |
5062 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5063 | } else { |
5064 | out1_ = out1; |
5065 | } |
5066 | |
5067 | at::Tensor out2_; |
5068 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
5069 | at::functionalization::impl::sync(out2); |
5070 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
5071 | } else { |
5072 | out2_ = out2; |
5073 | } |
5074 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
5075 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(rstd) || at::functionalization::impl::isFunctionalTensor(weight))) { |
5076 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5077 | TORCH_INTERNAL_ASSERT(false, |
5078 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5079 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5080 | } else { |
5081 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5082 | at::AutoDispatchSkipFunctionalize guard; |
5083 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_group_norm_backward_out::call(grad_out_, input_, mean_, rstd_, weight_, N, C, HxW, group, output_mask, out0_, out1_, out2_); |
5084 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
5085 | } |
5086 | } else { |
5087 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
5088 | { |
5089 | at::AutoDispatchSkipFunctionalize guard; |
5090 | tmp_output = at::_ops::native_group_norm_backward::call(grad_out_, input_, mean_, rstd_, weight_, N, C, HxW, group, output_mask); |
5091 | } |
5092 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5093 | at::functionalization::impl::commit_update(out0); |
5094 | at::functionalization::impl::sync(out0); |
5095 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5096 | at::functionalization::impl::commit_update(out1); |
5097 | at::functionalization::impl::sync(out1); |
5098 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
5099 | at::functionalization::impl::commit_update(out2); |
5100 | at::functionalization::impl::sync(out2); |
5101 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5102 | } |
5103 | } |
5104 | |
5105 | at::Tensor & _fft_r2c_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) { |
5106 | if (false) { |
5107 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5108 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5109 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5110 | auto self_meta = to_meta(self); |
5111 | auto out_meta = to_meta(out); |
5112 | at::AutoDispatchSkipFunctionalize func_guard; |
5113 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5114 | at::_ops::_fft_r2c_out::call(self_meta, dim, normalization, onesided, out_meta); |
5115 | } |
5116 | |
5117 | at::Tensor self_; |
5118 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5119 | at::functionalization::impl::sync(self); |
5120 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5121 | } else { |
5122 | self_ = self; |
5123 | } |
5124 | |
5125 | at::Tensor out_; |
5126 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5127 | at::functionalization::impl::sync(out); |
5128 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5129 | } else { |
5130 | out_ = out; |
5131 | } |
5132 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5133 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5134 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5135 | TORCH_INTERNAL_ASSERT(false, |
5136 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5137 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5138 | } else { |
5139 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5140 | at::AutoDispatchSkipFunctionalize guard; |
5141 | at::Tensor tmp_output = at::_ops::_fft_r2c_out::call(self_, dim, normalization, onesided, out_); |
5142 | return out;; |
5143 | } |
5144 | } else { |
5145 | at::Tensor tmp_output; |
5146 | { |
5147 | at::AutoDispatchSkipFunctionalize guard; |
5148 | tmp_output = at::_ops::_fft_r2c::call(self_, dim, normalization, onesided); |
5149 | } |
5150 | at::functionalization::impl::replace_(out, tmp_output); |
5151 | at::functionalization::impl::commit_update(out); |
5152 | at::functionalization::impl::sync(out); |
5153 | return out; |
5154 | } |
5155 | } |
5156 | |
5157 | at::Tensor & index_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, at::Tensor & out) { |
5158 | if (false) { |
5159 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5160 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5161 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5162 | auto self_meta = to_meta(self); |
5163 | auto indices_meta = to_meta(indices); |
5164 | auto out_meta = to_meta(out); |
5165 | at::AutoDispatchSkipFunctionalize func_guard; |
5166 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5167 | at::_ops::index_Tensor_out::call(self_meta, indices_meta, out_meta); |
5168 | } |
5169 | |
5170 | at::Tensor self_; |
5171 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5172 | at::functionalization::impl::sync(self); |
5173 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5174 | } else { |
5175 | self_ = self; |
5176 | } |
5177 | |
5178 | c10::List<c10::optional<at::Tensor>> indices_; |
5179 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5180 | at::functionalization::impl::sync(indices); |
5181 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5182 | } else { |
5183 | indices_ = indices; |
5184 | } |
5185 | |
5186 | at::Tensor out_; |
5187 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5188 | at::functionalization::impl::sync(out); |
5189 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5190 | } else { |
5191 | out_ = out; |
5192 | } |
5193 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5194 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) { |
5195 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5196 | TORCH_INTERNAL_ASSERT(false, |
5197 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5198 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5199 | } else { |
5200 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5201 | at::AutoDispatchSkipFunctionalize guard; |
5202 | at::Tensor tmp_output = at::_ops::index_Tensor_out::call(self_, indices_, out_); |
5203 | return out;; |
5204 | } |
5205 | } else { |
5206 | at::Tensor tmp_output; |
5207 | { |
5208 | at::AutoDispatchSkipFunctionalize guard; |
5209 | tmp_output = at::_ops::index_Tensor::call(self_, indices_); |
5210 | } |
5211 | at::functionalization::impl::replace_(out, tmp_output); |
5212 | at::functionalization::impl::commit_update(out); |
5213 | at::functionalization::impl::sync(out); |
5214 | return out; |
5215 | } |
5216 | } |
5217 | |
5218 | at::Tensor & index_put_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { |
5219 | if (false) { |
5220 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5221 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5222 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5223 | auto self_meta = to_meta(self); |
5224 | auto indices_meta = to_meta(indices); |
5225 | auto values_meta = to_meta(values); |
5226 | auto out_meta = to_meta(out); |
5227 | at::AutoDispatchSkipFunctionalize func_guard; |
5228 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5229 | at::_ops::index_put_out::call(self_meta, indices_meta, values_meta, accumulate, out_meta); |
5230 | } |
5231 | |
5232 | at::Tensor self_; |
5233 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5234 | at::functionalization::impl::sync(self); |
5235 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5236 | } else { |
5237 | self_ = self; |
5238 | } |
5239 | |
5240 | c10::List<c10::optional<at::Tensor>> indices_; |
5241 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5242 | at::functionalization::impl::sync(indices); |
5243 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5244 | } else { |
5245 | indices_ = indices; |
5246 | } |
5247 | |
5248 | at::Tensor values_; |
5249 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
5250 | at::functionalization::impl::sync(values); |
5251 | values_ = at::functionalization::impl::from_functional_tensor(values); |
5252 | } else { |
5253 | values_ = values; |
5254 | } |
5255 | |
5256 | at::Tensor out_; |
5257 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5258 | at::functionalization::impl::sync(out); |
5259 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5260 | } else { |
5261 | out_ = out; |
5262 | } |
5263 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5264 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) { |
5265 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5266 | TORCH_INTERNAL_ASSERT(false, |
5267 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5268 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5269 | } else { |
5270 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5271 | at::AutoDispatchSkipFunctionalize guard; |
5272 | at::Tensor tmp_output = at::_ops::index_put_out::call(self_, indices_, values_, accumulate, out_); |
5273 | return out;; |
5274 | } |
5275 | } else { |
5276 | at::Tensor tmp_output; |
5277 | { |
5278 | at::AutoDispatchSkipFunctionalize guard; |
5279 | tmp_output = at::_ops::index_put::call(self_, indices_, values_, accumulate); |
5280 | } |
5281 | at::functionalization::impl::replace_(out, tmp_output); |
5282 | at::functionalization::impl::commit_update(out); |
5283 | at::functionalization::impl::sync(out); |
5284 | return out; |
5285 | } |
5286 | } |
5287 | |
5288 | at::Tensor & index_put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) { |
5289 | if (true) { |
5290 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5291 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5292 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5293 | auto self_meta = to_meta(self); |
5294 | auto indices_meta = to_meta(indices); |
5295 | auto values_meta = to_meta(values); |
5296 | at::AutoDispatchSkipFunctionalize func_guard; |
5297 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5298 | at::_ops::index_put_::call(self_meta, indices_meta, values_meta, accumulate); |
5299 | } |
5300 | |
5301 | at::Tensor self_; |
5302 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5303 | at::functionalization::impl::sync(self); |
5304 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5305 | } else { |
5306 | self_ = self; |
5307 | } |
5308 | |
5309 | c10::List<c10::optional<at::Tensor>> indices_; |
5310 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5311 | at::functionalization::impl::sync(indices); |
5312 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5313 | } else { |
5314 | indices_ = indices; |
5315 | } |
5316 | |
5317 | at::Tensor values_; |
5318 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
5319 | at::functionalization::impl::sync(values); |
5320 | values_ = at::functionalization::impl::from_functional_tensor(values); |
5321 | } else { |
5322 | values_ = values; |
5323 | } |
5324 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
5325 | if ((false || at::functionalization::impl::isFunctionalTensor(indices) || at::functionalization::impl::isFunctionalTensor(values))) { |
5326 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5327 | TORCH_INTERNAL_ASSERT(false, |
5328 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5329 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5330 | } else { |
5331 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5332 | at::AutoDispatchSkipFunctionalize guard; |
5333 | at::Tensor tmp_output = at::_ops::index_put_::call(self_, indices_, values_, accumulate); |
5334 | return self;; |
5335 | } |
5336 | } else { |
5337 | at::Tensor tmp_output; |
5338 | { |
5339 | at::AutoDispatchSkipFunctionalize guard; |
5340 | tmp_output = at::_ops::index_put::call(self_, indices_, values_, accumulate); |
5341 | } |
5342 | at::functionalization::impl::replace_(self, tmp_output); |
5343 | at::functionalization::impl::commit_update(self); |
5344 | at::functionalization::impl::sync(self); |
5345 | return self; |
5346 | } |
5347 | } |
5348 | |
5349 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linear_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
5350 | if (false) { |
5351 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5352 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5353 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5354 | auto self_meta = to_meta(self); |
5355 | auto grad_output_meta = to_meta(grad_output); |
5356 | auto weight_meta = to_meta(weight); |
5357 | auto out0_meta = to_meta(out0); |
5358 | auto out1_meta = to_meta(out1); |
5359 | auto out2_meta = to_meta(out2); |
5360 | at::AutoDispatchSkipFunctionalize func_guard; |
5361 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5362 | at::_ops::linear_backward_out::call(self_meta, grad_output_meta, weight_meta, output_mask, out0_meta, out1_meta, out2_meta); |
5363 | } |
5364 | |
5365 | at::Tensor self_; |
5366 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5367 | at::functionalization::impl::sync(self); |
5368 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5369 | } else { |
5370 | self_ = self; |
5371 | } |
5372 | |
5373 | at::Tensor grad_output_; |
5374 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
5375 | at::functionalization::impl::sync(grad_output); |
5376 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
5377 | } else { |
5378 | grad_output_ = grad_output; |
5379 | } |
5380 | |
5381 | at::Tensor weight_; |
5382 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5383 | at::functionalization::impl::sync(weight); |
5384 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5385 | } else { |
5386 | weight_ = weight; |
5387 | } |
5388 | |
5389 | at::Tensor out0_; |
5390 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5391 | at::functionalization::impl::sync(out0); |
5392 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5393 | } else { |
5394 | out0_ = out0; |
5395 | } |
5396 | |
5397 | at::Tensor out1_; |
5398 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5399 | at::functionalization::impl::sync(out1); |
5400 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5401 | } else { |
5402 | out1_ = out1; |
5403 | } |
5404 | |
5405 | at::Tensor out2_; |
5406 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
5407 | at::functionalization::impl::sync(out2); |
5408 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
5409 | } else { |
5410 | out2_ = out2; |
5411 | } |
5412 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
5413 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(weight))) { |
5414 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5415 | TORCH_INTERNAL_ASSERT(false, |
5416 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5417 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5418 | } else { |
5419 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5420 | at::AutoDispatchSkipFunctionalize guard; |
5421 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linear_backward_out::call(self_, grad_output_, weight_, output_mask, out0_, out1_, out2_); |
5422 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
5423 | } |
5424 | } else { |
5425 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
5426 | { |
5427 | at::AutoDispatchSkipFunctionalize guard; |
5428 | tmp_output = at::_ops::linear_backward::call(self_, grad_output_, weight_, output_mask); |
5429 | } |
5430 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5431 | at::functionalization::impl::commit_update(out0); |
5432 | at::functionalization::impl::sync(out0); |
5433 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5434 | at::functionalization::impl::commit_update(out1); |
5435 | at::functionalization::impl::sync(out1); |
5436 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
5437 | at::functionalization::impl::commit_update(out2); |
5438 | at::functionalization::impl::sync(out2); |
5439 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
5440 | } |
5441 | } |
5442 | |
5443 | ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { |
5444 | if (false) { |
5445 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5446 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5447 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5448 | auto grad_output_meta = to_meta(grad_output); |
5449 | auto input_meta = to_meta(input); |
5450 | auto weight_meta = to_meta(weight); |
5451 | auto out0_meta = to_meta(out0); |
5452 | auto out1_meta = to_meta(out1); |
5453 | at::AutoDispatchSkipFunctionalize func_guard; |
5454 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5455 | at::_ops::mkldnn_linear_backward_weights_out::call(grad_output_meta, input_meta, weight_meta, bias_defined, out0_meta, out1_meta); |
5456 | } |
5457 | |
5458 | at::Tensor grad_output_; |
5459 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
5460 | at::functionalization::impl::sync(grad_output); |
5461 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
5462 | } else { |
5463 | grad_output_ = grad_output; |
5464 | } |
5465 | |
5466 | at::Tensor input_; |
5467 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
5468 | at::functionalization::impl::sync(input); |
5469 | input_ = at::functionalization::impl::from_functional_tensor(input); |
5470 | } else { |
5471 | input_ = input; |
5472 | } |
5473 | |
5474 | at::Tensor weight_; |
5475 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
5476 | at::functionalization::impl::sync(weight); |
5477 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
5478 | } else { |
5479 | weight_ = weight; |
5480 | } |
5481 | |
5482 | at::Tensor out0_; |
5483 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5484 | at::functionalization::impl::sync(out0); |
5485 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5486 | } else { |
5487 | out0_ = out0; |
5488 | } |
5489 | |
5490 | at::Tensor out1_; |
5491 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5492 | at::functionalization::impl::sync(out1); |
5493 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5494 | } else { |
5495 | out1_ = out1; |
5496 | } |
5497 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
5498 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight))) { |
5499 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5500 | TORCH_INTERNAL_ASSERT(false, |
5501 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5502 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5503 | } else { |
5504 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5505 | at::AutoDispatchSkipFunctionalize guard; |
5506 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_linear_backward_weights_out::call(grad_output_, input_, weight_, bias_defined, out0_, out1_); |
5507 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
5508 | } |
5509 | } else { |
5510 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
5511 | { |
5512 | at::AutoDispatchSkipFunctionalize guard; |
5513 | tmp_output = at::_ops::mkldnn_linear_backward_weights::call(grad_output_, input_, weight_, bias_defined); |
5514 | } |
5515 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5516 | at::functionalization::impl::commit_update(out0); |
5517 | at::functionalization::impl::sync(out0); |
5518 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5519 | at::functionalization::impl::commit_update(out1); |
5520 | at::functionalization::impl::sync(out1); |
5521 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
5522 | } |
5523 | } |
5524 | |
5525 | at::Tensor & logaddexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
5526 | if (false) { |
5527 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5528 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5529 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5530 | auto self_meta = to_meta(self); |
5531 | auto other_meta = to_meta(other); |
5532 | auto out_meta = to_meta(out); |
5533 | at::AutoDispatchSkipFunctionalize func_guard; |
5534 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5535 | at::_ops::logaddexp_out::call(self_meta, other_meta, out_meta); |
5536 | } |
5537 | |
5538 | at::Tensor self_; |
5539 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5540 | at::functionalization::impl::sync(self); |
5541 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5542 | } else { |
5543 | self_ = self; |
5544 | } |
5545 | |
5546 | at::Tensor other_; |
5547 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
5548 | at::functionalization::impl::sync(other); |
5549 | other_ = at::functionalization::impl::from_functional_tensor(other); |
5550 | } else { |
5551 | other_ = other; |
5552 | } |
5553 | |
5554 | at::Tensor out_; |
5555 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5556 | at::functionalization::impl::sync(out); |
5557 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5558 | } else { |
5559 | out_ = out; |
5560 | } |
5561 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5562 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
5563 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5564 | TORCH_INTERNAL_ASSERT(false, |
5565 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5566 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5567 | } else { |
5568 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5569 | at::AutoDispatchSkipFunctionalize guard; |
5570 | at::Tensor tmp_output = at::_ops::logaddexp_out::call(self_, other_, out_); |
5571 | return out;; |
5572 | } |
5573 | } else { |
5574 | at::Tensor tmp_output; |
5575 | { |
5576 | at::AutoDispatchSkipFunctionalize guard; |
5577 | tmp_output = at::_ops::logaddexp::call(self_, other_); |
5578 | } |
5579 | at::functionalization::impl::replace_(out, tmp_output); |
5580 | at::functionalization::impl::commit_update(out); |
5581 | at::functionalization::impl::sync(out); |
5582 | return out; |
5583 | } |
5584 | } |
5585 | |
5586 | at::Tensor & _logcumsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { |
5587 | if (false) { |
5588 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5589 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5590 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5591 | auto self_meta = to_meta(self); |
5592 | auto out_meta = to_meta(out); |
5593 | at::AutoDispatchSkipFunctionalize func_guard; |
5594 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5595 | at::_ops::_logcumsumexp_out::call(self_meta, dim, out_meta); |
5596 | } |
5597 | |
5598 | at::Tensor self_; |
5599 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5600 | at::functionalization::impl::sync(self); |
5601 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5602 | } else { |
5603 | self_ = self; |
5604 | } |
5605 | |
5606 | at::Tensor out_; |
5607 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5608 | at::functionalization::impl::sync(out); |
5609 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5610 | } else { |
5611 | out_ = out; |
5612 | } |
5613 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5614 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5615 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5616 | TORCH_INTERNAL_ASSERT(false, |
5617 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5618 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5619 | } else { |
5620 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5621 | at::AutoDispatchSkipFunctionalize guard; |
5622 | at::Tensor tmp_output = at::_ops::_logcumsumexp_out::call(self_, dim, out_); |
5623 | return out;; |
5624 | } |
5625 | } else { |
5626 | at::Tensor tmp_output; |
5627 | { |
5628 | at::AutoDispatchSkipFunctionalize guard; |
5629 | tmp_output = at::_ops::_logcumsumexp::call(self_, dim); |
5630 | } |
5631 | at::functionalization::impl::replace_(out, tmp_output); |
5632 | at::functionalization::impl::commit_update(out); |
5633 | at::functionalization::impl::sync(out); |
5634 | return out; |
5635 | } |
5636 | } |
5637 | |
5638 | at::Tensor & logcumsumexp_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { |
5639 | if (false) { |
5640 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5641 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5642 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5643 | auto self_meta = to_meta(self); |
5644 | auto out_meta = to_meta(out); |
5645 | at::AutoDispatchSkipFunctionalize func_guard; |
5646 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5647 | at::_ops::logcumsumexp_out::call(self_meta, dim, out_meta); |
5648 | } |
5649 | |
5650 | at::Tensor self_; |
5651 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5652 | at::functionalization::impl::sync(self); |
5653 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5654 | } else { |
5655 | self_ = self; |
5656 | } |
5657 | |
5658 | at::Tensor out_; |
5659 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5660 | at::functionalization::impl::sync(out); |
5661 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5662 | } else { |
5663 | out_ = out; |
5664 | } |
5665 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5666 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5667 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5668 | TORCH_INTERNAL_ASSERT(false, |
5669 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5670 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5671 | } else { |
5672 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5673 | at::AutoDispatchSkipFunctionalize guard; |
5674 | at::Tensor tmp_output = at::_ops::logcumsumexp_out::call(self_, dim, out_); |
5675 | return out;; |
5676 | } |
5677 | } else { |
5678 | at::Tensor tmp_output; |
5679 | { |
5680 | at::AutoDispatchSkipFunctionalize guard; |
5681 | tmp_output = at::_ops::logcumsumexp::call(self_, dim); |
5682 | } |
5683 | at::functionalization::impl::replace_(out, tmp_output); |
5684 | at::functionalization::impl::commit_update(out); |
5685 | at::functionalization::impl::sync(out); |
5686 | return out; |
5687 | } |
5688 | } |
5689 | |
5690 | at::Tensor & logcumsumexp_out_dimname_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out) { |
5691 | if (false) { |
5692 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5693 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5694 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5695 | auto self_meta = to_meta(self); |
5696 | auto out_meta = to_meta(out); |
5697 | at::AutoDispatchSkipFunctionalize func_guard; |
5698 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5699 | at::_ops::logcumsumexp_dimname_out::call(self_meta, dim, out_meta); |
5700 | } |
5701 | |
5702 | at::Tensor self_; |
5703 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5704 | at::functionalization::impl::sync(self); |
5705 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5706 | } else { |
5707 | self_ = self; |
5708 | } |
5709 | |
5710 | at::Tensor out_; |
5711 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5712 | at::functionalization::impl::sync(out); |
5713 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5714 | } else { |
5715 | out_ = out; |
5716 | } |
5717 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5718 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5719 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5720 | TORCH_INTERNAL_ASSERT(false, |
5721 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5722 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5723 | } else { |
5724 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5725 | at::AutoDispatchSkipFunctionalize guard; |
5726 | at::Tensor tmp_output = at::_ops::logcumsumexp_dimname_out::call(self_, dim, out_); |
5727 | return out;; |
5728 | } |
5729 | } else { |
5730 | at::Tensor tmp_output; |
5731 | { |
5732 | at::AutoDispatchSkipFunctionalize guard; |
5733 | tmp_output = at::_ops::logcumsumexp_dimname::call(self_, dim); |
5734 | } |
5735 | at::functionalization::impl::replace_(out, tmp_output); |
5736 | at::functionalization::impl::commit_update(out); |
5737 | at::functionalization::impl::sync(out); |
5738 | return out; |
5739 | } |
5740 | } |
5741 | |
5742 | ::std::tuple<at::Tensor &,at::Tensor &> matmul_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask, at::Tensor & out0, at::Tensor & out1) { |
5743 | if (false) { |
5744 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5745 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5746 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5747 | auto grad_meta = to_meta(grad); |
5748 | auto self_meta = to_meta(self); |
5749 | auto other_meta = to_meta(other); |
5750 | auto out0_meta = to_meta(out0); |
5751 | auto out1_meta = to_meta(out1); |
5752 | at::AutoDispatchSkipFunctionalize func_guard; |
5753 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5754 | at::_ops::matmul_backward_out::call(grad_meta, self_meta, other_meta, mask, out0_meta, out1_meta); |
5755 | } |
5756 | |
5757 | at::Tensor grad_; |
5758 | if (at::functionalization::impl::isFunctionalTensor(grad)) { |
5759 | at::functionalization::impl::sync(grad); |
5760 | grad_ = at::functionalization::impl::from_functional_tensor(grad); |
5761 | } else { |
5762 | grad_ = grad; |
5763 | } |
5764 | |
5765 | at::Tensor self_; |
5766 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5767 | at::functionalization::impl::sync(self); |
5768 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5769 | } else { |
5770 | self_ = self; |
5771 | } |
5772 | |
5773 | at::Tensor other_; |
5774 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
5775 | at::functionalization::impl::sync(other); |
5776 | other_ = at::functionalization::impl::from_functional_tensor(other); |
5777 | } else { |
5778 | other_ = other; |
5779 | } |
5780 | |
5781 | at::Tensor out0_; |
5782 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
5783 | at::functionalization::impl::sync(out0); |
5784 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
5785 | } else { |
5786 | out0_ = out0; |
5787 | } |
5788 | |
5789 | at::Tensor out1_; |
5790 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
5791 | at::functionalization::impl::sync(out1); |
5792 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
5793 | } else { |
5794 | out1_ = out1; |
5795 | } |
5796 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
5797 | if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
5798 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5799 | TORCH_INTERNAL_ASSERT(false, |
5800 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5801 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5802 | } else { |
5803 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5804 | at::AutoDispatchSkipFunctionalize guard; |
5805 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::matmul_backward_out::call(grad_, self_, other_, mask, out0_, out1_); |
5806 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
5807 | } |
5808 | } else { |
5809 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
5810 | { |
5811 | at::AutoDispatchSkipFunctionalize guard; |
5812 | tmp_output = at::_ops::matmul_backward::call(grad_, self_, other_, mask); |
5813 | } |
5814 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
5815 | at::functionalization::impl::commit_update(out0); |
5816 | at::functionalization::impl::sync(out0); |
5817 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
5818 | at::functionalization::impl::commit_update(out1); |
5819 | at::functionalization::impl::sync(out1); |
5820 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
5821 | } |
5822 | } |
5823 | |
5824 | at::Tensor & mps_max_pool2d_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { |
5825 | if (false) { |
5826 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5827 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5828 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5829 | auto grad_output_meta = to_meta(grad_output); |
5830 | auto self_meta = to_meta(self); |
5831 | auto out_meta = to_meta(out); |
5832 | at::AutoDispatchSkipFunctionalize func_guard; |
5833 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5834 | at::_ops::mps_max_pool2d_backward_out::call(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, out_meta); |
5835 | } |
5836 | |
5837 | at::Tensor grad_output_; |
5838 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
5839 | at::functionalization::impl::sync(grad_output); |
5840 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
5841 | } else { |
5842 | grad_output_ = grad_output; |
5843 | } |
5844 | |
5845 | at::Tensor self_; |
5846 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5847 | at::functionalization::impl::sync(self); |
5848 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5849 | } else { |
5850 | self_ = self; |
5851 | } |
5852 | |
5853 | at::Tensor out_; |
5854 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5855 | at::functionalization::impl::sync(out); |
5856 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5857 | } else { |
5858 | out_ = out; |
5859 | } |
5860 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5861 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
5862 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5863 | TORCH_INTERNAL_ASSERT(false, |
5864 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5865 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5866 | } else { |
5867 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5868 | at::AutoDispatchSkipFunctionalize guard; |
5869 | at::Tensor tmp_output = at::_ops::mps_max_pool2d_backward_out::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, out_); |
5870 | return out;; |
5871 | } |
5872 | } else { |
5873 | at::Tensor tmp_output; |
5874 | { |
5875 | at::AutoDispatchSkipFunctionalize guard; |
5876 | tmp_output = at::_ops::mps_max_pool2d_backward::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode); |
5877 | } |
5878 | at::functionalization::impl::replace_(out, tmp_output); |
5879 | at::functionalization::impl::commit_update(out); |
5880 | at::functionalization::impl::sync(out); |
5881 | return out; |
5882 | } |
5883 | } |
5884 | |
5885 | at::Tensor & median_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
5886 | if (false) { |
5887 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5888 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5889 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5890 | auto self_meta = to_meta(self); |
5891 | auto out_meta = to_meta(out); |
5892 | at::AutoDispatchSkipFunctionalize func_guard; |
5893 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5894 | at::_ops::median_out::call(self_meta, out_meta); |
5895 | } |
5896 | |
5897 | at::Tensor self_; |
5898 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5899 | at::functionalization::impl::sync(self); |
5900 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5901 | } else { |
5902 | self_ = self; |
5903 | } |
5904 | |
5905 | at::Tensor out_; |
5906 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
5907 | at::functionalization::impl::sync(out); |
5908 | out_ = at::functionalization::impl::from_functional_tensor(out); |
5909 | } else { |
5910 | out_ = out; |
5911 | } |
5912 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
5913 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5914 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5915 | TORCH_INTERNAL_ASSERT(false, |
5916 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5917 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5918 | } else { |
5919 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5920 | at::AutoDispatchSkipFunctionalize guard; |
5921 | at::Tensor tmp_output = at::_ops::median_out::call(self_, out_); |
5922 | return out;; |
5923 | } |
5924 | } else { |
5925 | at::Tensor tmp_output; |
5926 | { |
5927 | at::AutoDispatchSkipFunctionalize guard; |
5928 | tmp_output = at::_ops::median::call(self_); |
5929 | } |
5930 | at::functionalization::impl::replace_(out, tmp_output); |
5931 | at::functionalization::impl::commit_update(out); |
5932 | at::functionalization::impl::sync(out); |
5933 | return out; |
5934 | } |
5935 | } |
5936 | |
5937 | ::std::tuple<at::Tensor &,at::Tensor &> median_out_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
5938 | if (false) { |
5939 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
5940 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
5941 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
5942 | auto self_meta = to_meta(self); |
5943 | auto values_meta = to_meta(values); |
5944 | auto indices_meta = to_meta(indices); |
5945 | at::AutoDispatchSkipFunctionalize func_guard; |
5946 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
5947 | at::_ops::median_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta); |
5948 | } |
5949 | |
5950 | at::Tensor self_; |
5951 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
5952 | at::functionalization::impl::sync(self); |
5953 | self_ = at::functionalization::impl::from_functional_tensor(self); |
5954 | } else { |
5955 | self_ = self; |
5956 | } |
5957 | |
5958 | at::Tensor values_; |
5959 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
5960 | at::functionalization::impl::sync(values); |
5961 | values_ = at::functionalization::impl::from_functional_tensor(values); |
5962 | } else { |
5963 | values_ = values; |
5964 | } |
5965 | |
5966 | at::Tensor indices_; |
5967 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
5968 | at::functionalization::impl::sync(indices); |
5969 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
5970 | } else { |
5971 | indices_ = indices; |
5972 | } |
5973 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
5974 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
5975 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
5976 | TORCH_INTERNAL_ASSERT(false, |
5977 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
5978 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
5979 | } else { |
5980 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
5981 | at::AutoDispatchSkipFunctionalize guard; |
5982 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::median_dim_values::call(self_, dim, keepdim, values_, indices_); |
5983 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
5984 | } |
5985 | } else { |
5986 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
5987 | { |
5988 | at::AutoDispatchSkipFunctionalize guard; |
5989 | tmp_output = at::_ops::median_dim::call(self_, dim, keepdim); |
5990 | } |
5991 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
5992 | at::functionalization::impl::commit_update(values); |
5993 | at::functionalization::impl::sync(values); |
5994 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
5995 | at::functionalization::impl::commit_update(indices); |
5996 | at::functionalization::impl::sync(indices); |
5997 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
5998 | } |
5999 | } |
6000 | |
6001 | ::std::tuple<at::Tensor &,at::Tensor &> median_out_names_dim_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { |
6002 | if (false) { |
6003 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6004 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6005 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6006 | auto self_meta = to_meta(self); |
6007 | auto values_meta = to_meta(values); |
6008 | auto indices_meta = to_meta(indices); |
6009 | at::AutoDispatchSkipFunctionalize func_guard; |
6010 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6011 | at::_ops::median_names_dim_values::call(self_meta, dim, keepdim, values_meta, indices_meta); |
6012 | } |
6013 | |
6014 | at::Tensor self_; |
6015 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6016 | at::functionalization::impl::sync(self); |
6017 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6018 | } else { |
6019 | self_ = self; |
6020 | } |
6021 | |
6022 | at::Tensor values_; |
6023 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
6024 | at::functionalization::impl::sync(values); |
6025 | values_ = at::functionalization::impl::from_functional_tensor(values); |
6026 | } else { |
6027 | values_ = values; |
6028 | } |
6029 | |
6030 | at::Tensor indices_; |
6031 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
6032 | at::functionalization::impl::sync(indices); |
6033 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
6034 | } else { |
6035 | indices_ = indices; |
6036 | } |
6037 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
6038 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6039 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6040 | TORCH_INTERNAL_ASSERT(false, |
6041 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6042 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6043 | } else { |
6044 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6045 | at::AutoDispatchSkipFunctionalize guard; |
6046 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::median_names_dim_values::call(self_, dim, keepdim, values_, indices_); |
6047 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
6048 | } |
6049 | } else { |
6050 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6051 | { |
6052 | at::AutoDispatchSkipFunctionalize guard; |
6053 | tmp_output = at::_ops::median_names_dim::call(self_, dim, keepdim); |
6054 | } |
6055 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
6056 | at::functionalization::impl::commit_update(values); |
6057 | at::functionalization::impl::sync(values); |
6058 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
6059 | at::functionalization::impl::commit_update(indices); |
6060 | at::functionalization::impl::sync(indices); |
6061 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
6062 | } |
6063 | } |
6064 | |
6065 | at::Tensor & amin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { |
6066 | if (false) { |
6067 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6068 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6069 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6070 | auto self_meta = to_meta(self); |
6071 | auto out_meta = to_meta(out); |
6072 | at::AutoDispatchSkipFunctionalize func_guard; |
6073 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6074 | at::_ops::amin_out::call(self_meta, dim, keepdim, out_meta); |
6075 | } |
6076 | |
6077 | at::Tensor self_; |
6078 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6079 | at::functionalization::impl::sync(self); |
6080 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6081 | } else { |
6082 | self_ = self; |
6083 | } |
6084 | |
6085 | at::Tensor out_; |
6086 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6087 | at::functionalization::impl::sync(out); |
6088 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6089 | } else { |
6090 | out_ = out; |
6091 | } |
6092 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6093 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
6094 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6095 | TORCH_INTERNAL_ASSERT(false, |
6096 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6097 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6098 | } else { |
6099 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6100 | at::AutoDispatchSkipFunctionalize guard; |
6101 | at::Tensor tmp_output = at::_ops::amin_out::call(self_, dim, keepdim, out_); |
6102 | return out;; |
6103 | } |
6104 | } else { |
6105 | at::Tensor tmp_output; |
6106 | { |
6107 | at::AutoDispatchSkipFunctionalize guard; |
6108 | tmp_output = at::_ops::amin::call(self_, dim, keepdim); |
6109 | } |
6110 | at::functionalization::impl::replace_(out, tmp_output); |
6111 | at::functionalization::impl::commit_update(out); |
6112 | at::functionalization::impl::sync(out); |
6113 | return out; |
6114 | } |
6115 | } |
6116 | |
6117 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> mkldnn_rnn_layer_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6) { |
6118 | if (false) { |
6119 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6120 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6121 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6122 | auto input_meta = to_meta(input); |
6123 | auto weight1_meta = to_meta(weight1); |
6124 | auto weight2_meta = to_meta(weight2); |
6125 | auto weight3_meta = to_meta(weight3); |
6126 | auto weight4_meta = to_meta(weight4); |
6127 | auto hx__meta = to_meta(hx_); |
6128 | auto cx_tmp_meta = to_meta(cx_tmp); |
6129 | auto output_meta = to_meta(output); |
6130 | auto hy__meta = to_meta(hy_); |
6131 | auto cy__meta = to_meta(cy_); |
6132 | auto grad_output_meta = to_meta(grad_output); |
6133 | auto grad_hy_meta = to_meta(grad_hy); |
6134 | auto grad_cy_meta = to_meta(grad_cy); |
6135 | auto workspace_meta = to_meta(workspace); |
6136 | auto out0_meta = to_meta(out0); |
6137 | auto out1_meta = to_meta(out1); |
6138 | auto out2_meta = to_meta(out2); |
6139 | auto out3_meta = to_meta(out3); |
6140 | auto out4_meta = to_meta(out4); |
6141 | auto out5_meta = to_meta(out5); |
6142 | auto out6_meta = to_meta(out6); |
6143 | at::AutoDispatchSkipFunctionalize func_guard; |
6144 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6145 | at::_ops::mkldnn_rnn_layer_backward_out::call(input_meta, weight1_meta, weight2_meta, weight3_meta, weight4_meta, hx__meta, cx_tmp_meta, output_meta, hy__meta, cy__meta, grad_output_meta, grad_hy_meta, grad_cy_meta, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_meta, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta, out5_meta, out6_meta); |
6146 | } |
6147 | |
6148 | at::Tensor input_; |
6149 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6150 | at::functionalization::impl::sync(input); |
6151 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6152 | } else { |
6153 | input_ = input; |
6154 | } |
6155 | |
6156 | at::Tensor weight1_; |
6157 | if (at::functionalization::impl::isFunctionalTensor(weight1)) { |
6158 | at::functionalization::impl::sync(weight1); |
6159 | weight1_ = at::functionalization::impl::from_functional_tensor(weight1); |
6160 | } else { |
6161 | weight1_ = weight1; |
6162 | } |
6163 | |
6164 | at::Tensor weight2_; |
6165 | if (at::functionalization::impl::isFunctionalTensor(weight2)) { |
6166 | at::functionalization::impl::sync(weight2); |
6167 | weight2_ = at::functionalization::impl::from_functional_tensor(weight2); |
6168 | } else { |
6169 | weight2_ = weight2; |
6170 | } |
6171 | |
6172 | at::Tensor weight3_; |
6173 | if (at::functionalization::impl::isFunctionalTensor(weight3)) { |
6174 | at::functionalization::impl::sync(weight3); |
6175 | weight3_ = at::functionalization::impl::from_functional_tensor(weight3); |
6176 | } else { |
6177 | weight3_ = weight3; |
6178 | } |
6179 | |
6180 | at::Tensor weight4_; |
6181 | if (at::functionalization::impl::isFunctionalTensor(weight4)) { |
6182 | at::functionalization::impl::sync(weight4); |
6183 | weight4_ = at::functionalization::impl::from_functional_tensor(weight4); |
6184 | } else { |
6185 | weight4_ = weight4; |
6186 | } |
6187 | |
6188 | at::Tensor hx__; |
6189 | if (at::functionalization::impl::isFunctionalTensor(hx_)) { |
6190 | at::functionalization::impl::sync(hx_); |
6191 | hx__ = at::functionalization::impl::from_functional_tensor(hx_); |
6192 | } else { |
6193 | hx__ = hx_; |
6194 | } |
6195 | |
6196 | at::Tensor cx_tmp_; |
6197 | if (at::functionalization::impl::isFunctionalTensor(cx_tmp)) { |
6198 | at::functionalization::impl::sync(cx_tmp); |
6199 | cx_tmp_ = at::functionalization::impl::from_functional_tensor(cx_tmp); |
6200 | } else { |
6201 | cx_tmp_ = cx_tmp; |
6202 | } |
6203 | |
6204 | at::Tensor output_; |
6205 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
6206 | at::functionalization::impl::sync(output); |
6207 | output_ = at::functionalization::impl::from_functional_tensor(output); |
6208 | } else { |
6209 | output_ = output; |
6210 | } |
6211 | |
6212 | at::Tensor hy__; |
6213 | if (at::functionalization::impl::isFunctionalTensor(hy_)) { |
6214 | at::functionalization::impl::sync(hy_); |
6215 | hy__ = at::functionalization::impl::from_functional_tensor(hy_); |
6216 | } else { |
6217 | hy__ = hy_; |
6218 | } |
6219 | |
6220 | at::Tensor cy__; |
6221 | if (at::functionalization::impl::isFunctionalTensor(cy_)) { |
6222 | at::functionalization::impl::sync(cy_); |
6223 | cy__ = at::functionalization::impl::from_functional_tensor(cy_); |
6224 | } else { |
6225 | cy__ = cy_; |
6226 | } |
6227 | |
6228 | c10::optional<at::Tensor> grad_output_; |
6229 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
6230 | at::functionalization::impl::sync(grad_output); |
6231 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
6232 | } else { |
6233 | grad_output_ = grad_output; |
6234 | } |
6235 | |
6236 | c10::optional<at::Tensor> grad_hy_; |
6237 | if (at::functionalization::impl::isFunctionalTensor(grad_hy)) { |
6238 | at::functionalization::impl::sync(grad_hy); |
6239 | grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy); |
6240 | } else { |
6241 | grad_hy_ = grad_hy; |
6242 | } |
6243 | |
6244 | c10::optional<at::Tensor> grad_cy_; |
6245 | if (at::functionalization::impl::isFunctionalTensor(grad_cy)) { |
6246 | at::functionalization::impl::sync(grad_cy); |
6247 | grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy); |
6248 | } else { |
6249 | grad_cy_ = grad_cy; |
6250 | } |
6251 | |
6252 | at::Tensor workspace_; |
6253 | if (at::functionalization::impl::isFunctionalTensor(workspace)) { |
6254 | at::functionalization::impl::sync(workspace); |
6255 | workspace_ = at::functionalization::impl::from_functional_tensor(workspace); |
6256 | } else { |
6257 | workspace_ = workspace; |
6258 | } |
6259 | |
6260 | at::Tensor out0_; |
6261 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
6262 | at::functionalization::impl::sync(out0); |
6263 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
6264 | } else { |
6265 | out0_ = out0; |
6266 | } |
6267 | |
6268 | at::Tensor out1_; |
6269 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
6270 | at::functionalization::impl::sync(out1); |
6271 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
6272 | } else { |
6273 | out1_ = out1; |
6274 | } |
6275 | |
6276 | at::Tensor out2_; |
6277 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
6278 | at::functionalization::impl::sync(out2); |
6279 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
6280 | } else { |
6281 | out2_ = out2; |
6282 | } |
6283 | |
6284 | at::Tensor out3_; |
6285 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
6286 | at::functionalization::impl::sync(out3); |
6287 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
6288 | } else { |
6289 | out3_ = out3; |
6290 | } |
6291 | |
6292 | at::Tensor out4_; |
6293 | if (at::functionalization::impl::isFunctionalTensor(out4)) { |
6294 | at::functionalization::impl::sync(out4); |
6295 | out4_ = at::functionalization::impl::from_functional_tensor(out4); |
6296 | } else { |
6297 | out4_ = out4; |
6298 | } |
6299 | |
6300 | at::Tensor out5_; |
6301 | if (at::functionalization::impl::isFunctionalTensor(out5)) { |
6302 | at::functionalization::impl::sync(out5); |
6303 | out5_ = at::functionalization::impl::from_functional_tensor(out5); |
6304 | } else { |
6305 | out5_ = out5; |
6306 | } |
6307 | |
6308 | at::Tensor out6_; |
6309 | if (at::functionalization::impl::isFunctionalTensor(out6)) { |
6310 | at::functionalization::impl::sync(out6); |
6311 | out6_ = at::functionalization::impl::from_functional_tensor(out6); |
6312 | } else { |
6313 | out6_ = out6; |
6314 | } |
6315 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4) && at::functionalization::impl::isFunctionalTensor(out5) && at::functionalization::impl::isFunctionalTensor(out6))) { |
6316 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight1) || at::functionalization::impl::isFunctionalTensor(weight2) || at::functionalization::impl::isFunctionalTensor(weight3) || at::functionalization::impl::isFunctionalTensor(weight4) || at::functionalization::impl::isFunctionalTensor(hx_) || at::functionalization::impl::isFunctionalTensor(cx_tmp) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(hy_) || at::functionalization::impl::isFunctionalTensor(cy_) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(workspace))) { |
6317 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6318 | TORCH_INTERNAL_ASSERT(false, |
6319 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6320 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6321 | } else { |
6322 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6323 | at::AutoDispatchSkipFunctionalize guard; |
6324 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::mkldnn_rnn_layer_backward_out::call(input_, weight1_, weight2_, weight3_, weight4_, hx__, cx_tmp_, output_, hy__, cy__, grad_output_, grad_hy_, grad_cy_, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_, out0_, out1_, out2_, out3_, out4_, out5_, out6_); |
6325 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5, out6);; |
6326 | } |
6327 | } else { |
6328 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
6329 | { |
6330 | at::AutoDispatchSkipFunctionalize guard; |
6331 | tmp_output = at::_ops::mkldnn_rnn_layer_backward::call(input_, weight1_, weight2_, weight3_, weight4_, hx__, cx_tmp_, output_, hy__, cy__, grad_output_, grad_hy_, grad_cy_, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_); |
6332 | } |
6333 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
6334 | at::functionalization::impl::commit_update(out0); |
6335 | at::functionalization::impl::sync(out0); |
6336 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
6337 | at::functionalization::impl::commit_update(out1); |
6338 | at::functionalization::impl::sync(out1); |
6339 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
6340 | at::functionalization::impl::commit_update(out2); |
6341 | at::functionalization::impl::sync(out2); |
6342 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
6343 | at::functionalization::impl::commit_update(out3); |
6344 | at::functionalization::impl::sync(out3); |
6345 | at::functionalization::impl::replace_(out4, std::get<4>(tmp_output)); |
6346 | at::functionalization::impl::commit_update(out4); |
6347 | at::functionalization::impl::sync(out4); |
6348 | at::functionalization::impl::replace_(out5, std::get<5>(tmp_output)); |
6349 | at::functionalization::impl::commit_update(out5); |
6350 | at::functionalization::impl::sync(out5); |
6351 | at::functionalization::impl::replace_(out6, std::get<6>(tmp_output)); |
6352 | at::functionalization::impl::commit_update(out6); |
6353 | at::functionalization::impl::sync(out6); |
6354 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4, out5, out6); |
6355 | } |
6356 | } |
6357 | |
6358 | at::Tensor & miopen_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
6359 | if (false) { |
6360 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6361 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6362 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6363 | auto self_meta = to_meta(self); |
6364 | auto weight_meta = to_meta(weight); |
6365 | auto bias_meta = to_meta(bias); |
6366 | auto out_meta = to_meta(out); |
6367 | at::AutoDispatchSkipFunctionalize func_guard; |
6368 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6369 | at::_ops::miopen_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, benchmark, deterministic, out_meta); |
6370 | } |
6371 | |
6372 | at::Tensor self_; |
6373 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6374 | at::functionalization::impl::sync(self); |
6375 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6376 | } else { |
6377 | self_ = self; |
6378 | } |
6379 | |
6380 | at::Tensor weight_; |
6381 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6382 | at::functionalization::impl::sync(weight); |
6383 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6384 | } else { |
6385 | weight_ = weight; |
6386 | } |
6387 | |
6388 | c10::optional<at::Tensor> bias_; |
6389 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6390 | at::functionalization::impl::sync(bias); |
6391 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6392 | } else { |
6393 | bias_ = bias; |
6394 | } |
6395 | |
6396 | at::Tensor out_; |
6397 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6398 | at::functionalization::impl::sync(out); |
6399 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6400 | } else { |
6401 | out_ = out; |
6402 | } |
6403 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6404 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
6405 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6406 | TORCH_INTERNAL_ASSERT(false, |
6407 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6408 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6409 | } else { |
6410 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6411 | at::AutoDispatchSkipFunctionalize guard; |
6412 | at::Tensor tmp_output = at::_ops::miopen_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic, out_); |
6413 | return out;; |
6414 | } |
6415 | } else { |
6416 | at::Tensor tmp_output; |
6417 | { |
6418 | at::AutoDispatchSkipFunctionalize guard; |
6419 | tmp_output = at::_ops::miopen_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic); |
6420 | } |
6421 | at::functionalization::impl::replace_(out, tmp_output); |
6422 | at::functionalization::impl::commit_update(out); |
6423 | at::functionalization::impl::sync(out); |
6424 | return out; |
6425 | } |
6426 | } |
6427 | |
6428 | at::Tensor & miopen_depthwise_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { |
6429 | if (false) { |
6430 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6431 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6432 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6433 | auto self_meta = to_meta(self); |
6434 | auto weight_meta = to_meta(weight); |
6435 | auto bias_meta = to_meta(bias); |
6436 | auto out_meta = to_meta(out); |
6437 | at::AutoDispatchSkipFunctionalize func_guard; |
6438 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6439 | at::_ops::miopen_depthwise_convolution_out::call(self_meta, weight_meta, bias_meta, padding, stride, dilation, groups, benchmark, deterministic, out_meta); |
6440 | } |
6441 | |
6442 | at::Tensor self_; |
6443 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
6444 | at::functionalization::impl::sync(self); |
6445 | self_ = at::functionalization::impl::from_functional_tensor(self); |
6446 | } else { |
6447 | self_ = self; |
6448 | } |
6449 | |
6450 | at::Tensor weight_; |
6451 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6452 | at::functionalization::impl::sync(weight); |
6453 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6454 | } else { |
6455 | weight_ = weight; |
6456 | } |
6457 | |
6458 | c10::optional<at::Tensor> bias_; |
6459 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6460 | at::functionalization::impl::sync(bias); |
6461 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6462 | } else { |
6463 | bias_ = bias; |
6464 | } |
6465 | |
6466 | at::Tensor out_; |
6467 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6468 | at::functionalization::impl::sync(out); |
6469 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6470 | } else { |
6471 | out_ = out; |
6472 | } |
6473 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6474 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
6475 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6476 | TORCH_INTERNAL_ASSERT(false, |
6477 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6478 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6479 | } else { |
6480 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6481 | at::AutoDispatchSkipFunctionalize guard; |
6482 | at::Tensor tmp_output = at::_ops::miopen_depthwise_convolution_out::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic, out_); |
6483 | return out;; |
6484 | } |
6485 | } else { |
6486 | at::Tensor tmp_output; |
6487 | { |
6488 | at::AutoDispatchSkipFunctionalize guard; |
6489 | tmp_output = at::_ops::miopen_depthwise_convolution::call(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic); |
6490 | } |
6491 | at::functionalization::impl::replace_(out, tmp_output); |
6492 | at::functionalization::impl::commit_update(out); |
6493 | at::functionalization::impl::sync(out); |
6494 | return out; |
6495 | } |
6496 | } |
6497 | |
6498 | void miopen_rnn_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { |
6499 | if (false) { |
6500 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6501 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6502 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6503 | auto input_meta = to_meta(input); |
6504 | auto weight_meta = to_meta(weight); |
6505 | auto weight_buf_meta = to_meta(weight_buf); |
6506 | auto hx_meta = to_meta(hx); |
6507 | auto cx_meta = to_meta(cx); |
6508 | auto output_meta = to_meta(output); |
6509 | auto grad_output_meta = to_meta(grad_output); |
6510 | auto grad_hy_meta = to_meta(grad_hy); |
6511 | auto grad_cy_meta = to_meta(grad_cy); |
6512 | auto dropout_state_meta = to_meta(dropout_state); |
6513 | auto reserve_meta = to_meta(reserve); |
6514 | auto out0_meta = to_meta(out0); |
6515 | auto out1_meta = to_meta(out1); |
6516 | auto out2_meta = to_meta(out2); |
6517 | auto out3_meta = to_meta(out3); |
6518 | at::AutoDispatchSkipFunctionalize func_guard; |
6519 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6520 | at::_ops::miopen_rnn_backward_out::call(input_meta, weight_meta, weight_stride0, weight_buf_meta, hx_meta, cx_meta, output_meta, grad_output_meta, grad_hy_meta, grad_cy_meta, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_meta, reserve_meta, output_mask, out0_meta, out1_meta, out2_meta, out3_meta); |
6521 | } |
6522 | |
6523 | at::Tensor input_; |
6524 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6525 | at::functionalization::impl::sync(input); |
6526 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6527 | } else { |
6528 | input_ = input; |
6529 | } |
6530 | |
6531 | ::std::vector<at::Tensor> weight_; |
6532 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6533 | at::functionalization::impl::sync(weight); |
6534 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6535 | } else { |
6536 | weight_ = weight.vec(); |
6537 | } |
6538 | |
6539 | at::Tensor weight_buf_; |
6540 | if (at::functionalization::impl::isFunctionalTensor(weight_buf)) { |
6541 | at::functionalization::impl::sync(weight_buf); |
6542 | weight_buf_ = at::functionalization::impl::from_functional_tensor(weight_buf); |
6543 | } else { |
6544 | weight_buf_ = weight_buf; |
6545 | } |
6546 | |
6547 | at::Tensor hx_; |
6548 | if (at::functionalization::impl::isFunctionalTensor(hx)) { |
6549 | at::functionalization::impl::sync(hx); |
6550 | hx_ = at::functionalization::impl::from_functional_tensor(hx); |
6551 | } else { |
6552 | hx_ = hx; |
6553 | } |
6554 | |
6555 | c10::optional<at::Tensor> cx_; |
6556 | if (at::functionalization::impl::isFunctionalTensor(cx)) { |
6557 | at::functionalization::impl::sync(cx); |
6558 | cx_ = at::functionalization::impl::from_functional_tensor(cx); |
6559 | } else { |
6560 | cx_ = cx; |
6561 | } |
6562 | |
6563 | at::Tensor output_; |
6564 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
6565 | at::functionalization::impl::sync(output); |
6566 | output_ = at::functionalization::impl::from_functional_tensor(output); |
6567 | } else { |
6568 | output_ = output; |
6569 | } |
6570 | |
6571 | c10::optional<at::Tensor> grad_output_; |
6572 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
6573 | at::functionalization::impl::sync(grad_output); |
6574 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
6575 | } else { |
6576 | grad_output_ = grad_output; |
6577 | } |
6578 | |
6579 | c10::optional<at::Tensor> grad_hy_; |
6580 | if (at::functionalization::impl::isFunctionalTensor(grad_hy)) { |
6581 | at::functionalization::impl::sync(grad_hy); |
6582 | grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy); |
6583 | } else { |
6584 | grad_hy_ = grad_hy; |
6585 | } |
6586 | |
6587 | c10::optional<at::Tensor> grad_cy_; |
6588 | if (at::functionalization::impl::isFunctionalTensor(grad_cy)) { |
6589 | at::functionalization::impl::sync(grad_cy); |
6590 | grad_cy_ = at::functionalization::impl::from_functional_tensor(grad_cy); |
6591 | } else { |
6592 | grad_cy_ = grad_cy; |
6593 | } |
6594 | |
6595 | c10::optional<at::Tensor> dropout_state_; |
6596 | if (at::functionalization::impl::isFunctionalTensor(dropout_state)) { |
6597 | at::functionalization::impl::sync(dropout_state); |
6598 | dropout_state_ = at::functionalization::impl::from_functional_tensor(dropout_state); |
6599 | } else { |
6600 | dropout_state_ = dropout_state; |
6601 | } |
6602 | |
6603 | at::Tensor reserve_; |
6604 | if (at::functionalization::impl::isFunctionalTensor(reserve)) { |
6605 | at::functionalization::impl::sync(reserve); |
6606 | reserve_ = at::functionalization::impl::from_functional_tensor(reserve); |
6607 | } else { |
6608 | reserve_ = reserve; |
6609 | } |
6610 | |
6611 | at::Tensor out0_; |
6612 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
6613 | at::functionalization::impl::sync(out0); |
6614 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
6615 | } else { |
6616 | out0_ = out0; |
6617 | } |
6618 | |
6619 | at::Tensor out1_; |
6620 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
6621 | at::functionalization::impl::sync(out1); |
6622 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
6623 | } else { |
6624 | out1_ = out1; |
6625 | } |
6626 | |
6627 | at::Tensor out2_; |
6628 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
6629 | at::functionalization::impl::sync(out2); |
6630 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
6631 | } else { |
6632 | out2_ = out2; |
6633 | } |
6634 | |
6635 | ::std::vector<at::Tensor> out3_; |
6636 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
6637 | at::functionalization::impl::sync(out3); |
6638 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
6639 | } else { |
6640 | out3_ = out3.vec(); |
6641 | } |
6642 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3))) { |
6643 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(weight_buf) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(cx) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(grad_cy) || at::functionalization::impl::isFunctionalTensor(dropout_state) || at::functionalization::impl::isFunctionalTensor(reserve))) { |
6644 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6645 | TORCH_INTERNAL_ASSERT(false, |
6646 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6647 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6648 | } else { |
6649 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6650 | at::AutoDispatchSkipFunctionalize guard; |
6651 | at::_ops::miopen_rnn_backward_out::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask, out0_, out1_, out2_, out3_); |
6652 | ; |
6653 | } |
6654 | } else { |
6655 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> tmp_output; |
6656 | { |
6657 | at::AutoDispatchSkipFunctionalize guard; |
6658 | tmp_output = at::_ops::miopen_rnn_backward::call(input_, weight_, weight_stride0, weight_buf_, hx_, cx_, output_, grad_output_, grad_hy_, grad_cy_, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_, reserve_, output_mask); |
6659 | } |
6660 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
6661 | at::functionalization::impl::commit_update(out0); |
6662 | at::functionalization::impl::sync(out0); |
6663 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
6664 | at::functionalization::impl::commit_update(out1); |
6665 | at::functionalization::impl::sync(out1); |
6666 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
6667 | at::functionalization::impl::commit_update(out2); |
6668 | at::functionalization::impl::sync(out2); |
6669 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
6670 | at::functionalization::impl::commit_update(out3); |
6671 | at::functionalization::impl::sync(out3); |
6672 | |
6673 | } |
6674 | } |
6675 | |
6676 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> native_batch_norm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) { |
6677 | if (false) { |
6678 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6679 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6680 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6681 | auto input_meta = to_meta(input); |
6682 | auto weight_meta = to_meta(weight); |
6683 | auto bias_meta = to_meta(bias); |
6684 | auto running_mean_meta = to_meta(running_mean); |
6685 | auto running_var_meta = to_meta(running_var); |
6686 | auto out_meta = to_meta(out); |
6687 | auto save_mean_meta = to_meta(save_mean); |
6688 | auto save_invstd_meta = to_meta(save_invstd); |
6689 | at::AutoDispatchSkipFunctionalize func_guard; |
6690 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6691 | at::_ops::native_batch_norm_out::call(input_meta, weight_meta, bias_meta, running_mean_meta, running_var_meta, training, momentum, eps, out_meta, save_mean_meta, save_invstd_meta); |
6692 | } |
6693 | |
6694 | at::Tensor input_; |
6695 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6696 | at::functionalization::impl::sync(input); |
6697 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6698 | } else { |
6699 | input_ = input; |
6700 | } |
6701 | |
6702 | c10::optional<at::Tensor> weight_; |
6703 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6704 | at::functionalization::impl::sync(weight); |
6705 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6706 | } else { |
6707 | weight_ = weight; |
6708 | } |
6709 | |
6710 | c10::optional<at::Tensor> bias_; |
6711 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6712 | at::functionalization::impl::sync(bias); |
6713 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6714 | } else { |
6715 | bias_ = bias; |
6716 | } |
6717 | |
6718 | c10::optional<at::Tensor> running_mean_; |
6719 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
6720 | at::functionalization::impl::sync(running_mean); |
6721 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
6722 | } else { |
6723 | running_mean_ = running_mean; |
6724 | } |
6725 | |
6726 | c10::optional<at::Tensor> running_var_; |
6727 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
6728 | at::functionalization::impl::sync(running_var); |
6729 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
6730 | } else { |
6731 | running_var_ = running_var; |
6732 | } |
6733 | |
6734 | at::Tensor out_; |
6735 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6736 | at::functionalization::impl::sync(out); |
6737 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6738 | } else { |
6739 | out_ = out; |
6740 | } |
6741 | |
6742 | at::Tensor save_mean_; |
6743 | if (at::functionalization::impl::isFunctionalTensor(save_mean)) { |
6744 | at::functionalization::impl::sync(save_mean); |
6745 | save_mean_ = at::functionalization::impl::from_functional_tensor(save_mean); |
6746 | } else { |
6747 | save_mean_ = save_mean; |
6748 | } |
6749 | |
6750 | at::Tensor save_invstd_; |
6751 | if (at::functionalization::impl::isFunctionalTensor(save_invstd)) { |
6752 | at::functionalization::impl::sync(save_invstd); |
6753 | save_invstd_ = at::functionalization::impl::from_functional_tensor(save_invstd); |
6754 | } else { |
6755 | save_invstd_ = save_invstd; |
6756 | } |
6757 | if (!(true && at::functionalization::impl::isFunctionalTensor(out) && at::functionalization::impl::isFunctionalTensor(save_mean) && at::functionalization::impl::isFunctionalTensor(save_invstd))) { |
6758 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) { |
6759 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6760 | TORCH_INTERNAL_ASSERT(false, |
6761 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6762 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6763 | } else { |
6764 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6765 | at::AutoDispatchSkipFunctionalize guard; |
6766 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::native_batch_norm_out::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps, out_, save_mean_, save_invstd_); |
6767 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd);; |
6768 | } |
6769 | } else { |
6770 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
6771 | { |
6772 | at::AutoDispatchSkipFunctionalize guard; |
6773 | tmp_output = at::_ops::native_batch_norm::call(input_, weight_, bias_, running_mean_, running_var_, training, momentum, eps); |
6774 | } |
6775 | at::functionalization::impl::replace_(out, std::get<0>(tmp_output)); |
6776 | at::functionalization::impl::commit_update(out); |
6777 | at::functionalization::impl::sync(out); |
6778 | at::functionalization::impl::replace_(save_mean, std::get<1>(tmp_output)); |
6779 | at::functionalization::impl::commit_update(save_mean); |
6780 | at::functionalization::impl::sync(save_mean); |
6781 | at::functionalization::impl::replace_(save_invstd, std::get<2>(tmp_output)); |
6782 | at::functionalization::impl::commit_update(save_invstd); |
6783 | at::functionalization::impl::sync(save_invstd); |
6784 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out, save_mean, save_invstd); |
6785 | } |
6786 | } |
6787 | |
6788 | at::Tensor & batch_norm_elemt_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) { |
6789 | if (false) { |
6790 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6791 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6792 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6793 | auto input_meta = to_meta(input); |
6794 | auto weight_meta = to_meta(weight); |
6795 | auto bias_meta = to_meta(bias); |
6796 | auto mean_meta = to_meta(mean); |
6797 | auto invstd_meta = to_meta(invstd); |
6798 | auto out_meta = to_meta(out); |
6799 | at::AutoDispatchSkipFunctionalize func_guard; |
6800 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6801 | at::_ops::batch_norm_elemt_out::call(input_meta, weight_meta, bias_meta, mean_meta, invstd_meta, eps, out_meta); |
6802 | } |
6803 | |
6804 | at::Tensor input_; |
6805 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6806 | at::functionalization::impl::sync(input); |
6807 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6808 | } else { |
6809 | input_ = input; |
6810 | } |
6811 | |
6812 | c10::optional<at::Tensor> weight_; |
6813 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6814 | at::functionalization::impl::sync(weight); |
6815 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6816 | } else { |
6817 | weight_ = weight; |
6818 | } |
6819 | |
6820 | c10::optional<at::Tensor> bias_; |
6821 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6822 | at::functionalization::impl::sync(bias); |
6823 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6824 | } else { |
6825 | bias_ = bias; |
6826 | } |
6827 | |
6828 | at::Tensor mean_; |
6829 | if (at::functionalization::impl::isFunctionalTensor(mean)) { |
6830 | at::functionalization::impl::sync(mean); |
6831 | mean_ = at::functionalization::impl::from_functional_tensor(mean); |
6832 | } else { |
6833 | mean_ = mean; |
6834 | } |
6835 | |
6836 | at::Tensor invstd_; |
6837 | if (at::functionalization::impl::isFunctionalTensor(invstd)) { |
6838 | at::functionalization::impl::sync(invstd); |
6839 | invstd_ = at::functionalization::impl::from_functional_tensor(invstd); |
6840 | } else { |
6841 | invstd_ = invstd; |
6842 | } |
6843 | |
6844 | at::Tensor out_; |
6845 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6846 | at::functionalization::impl::sync(out); |
6847 | out_ = at::functionalization::impl::from_functional_tensor(out); |
6848 | } else { |
6849 | out_ = out; |
6850 | } |
6851 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
6852 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias) || at::functionalization::impl::isFunctionalTensor(mean) || at::functionalization::impl::isFunctionalTensor(invstd))) { |
6853 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6854 | TORCH_INTERNAL_ASSERT(false, |
6855 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6856 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6857 | } else { |
6858 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6859 | at::AutoDispatchSkipFunctionalize guard; |
6860 | at::Tensor tmp_output = at::_ops::batch_norm_elemt_out::call(input_, weight_, bias_, mean_, invstd_, eps, out_); |
6861 | return out;; |
6862 | } |
6863 | } else { |
6864 | at::Tensor tmp_output; |
6865 | { |
6866 | at::AutoDispatchSkipFunctionalize guard; |
6867 | tmp_output = at::_ops::batch_norm_elemt::call(input_, weight_, bias_, mean_, invstd_, eps); |
6868 | } |
6869 | at::functionalization::impl::replace_(out, tmp_output); |
6870 | at::functionalization::impl::commit_update(out); |
6871 | at::functionalization::impl::sync(out); |
6872 | return out; |
6873 | } |
6874 | } |
6875 | |
6876 | ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) { |
6877 | if (false) { |
6878 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6879 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6880 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6881 | auto input_meta = to_meta(input); |
6882 | auto running_mean_meta = to_meta(running_mean); |
6883 | auto running_var_meta = to_meta(running_var); |
6884 | auto out0_meta = to_meta(out0); |
6885 | auto out1_meta = to_meta(out1); |
6886 | at::AutoDispatchSkipFunctionalize func_guard; |
6887 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6888 | at::_ops::batch_norm_update_stats_out::call(input_meta, running_mean_meta, running_var_meta, momentum, out0_meta, out1_meta); |
6889 | } |
6890 | |
6891 | at::Tensor input_; |
6892 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6893 | at::functionalization::impl::sync(input); |
6894 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6895 | } else { |
6896 | input_ = input; |
6897 | } |
6898 | |
6899 | c10::optional<at::Tensor> running_mean_; |
6900 | if (at::functionalization::impl::isFunctionalTensor(running_mean)) { |
6901 | at::functionalization::impl::sync(running_mean); |
6902 | running_mean_ = at::functionalization::impl::from_functional_tensor(running_mean); |
6903 | } else { |
6904 | running_mean_ = running_mean; |
6905 | } |
6906 | |
6907 | c10::optional<at::Tensor> running_var_; |
6908 | if (at::functionalization::impl::isFunctionalTensor(running_var)) { |
6909 | at::functionalization::impl::sync(running_var); |
6910 | running_var_ = at::functionalization::impl::from_functional_tensor(running_var); |
6911 | } else { |
6912 | running_var_ = running_var; |
6913 | } |
6914 | |
6915 | at::Tensor out0_; |
6916 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
6917 | at::functionalization::impl::sync(out0); |
6918 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
6919 | } else { |
6920 | out0_ = out0; |
6921 | } |
6922 | |
6923 | at::Tensor out1_; |
6924 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
6925 | at::functionalization::impl::sync(out1); |
6926 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
6927 | } else { |
6928 | out1_ = out1; |
6929 | } |
6930 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1))) { |
6931 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(running_mean) || at::functionalization::impl::isFunctionalTensor(running_var))) { |
6932 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
6933 | TORCH_INTERNAL_ASSERT(false, |
6934 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
6935 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
6936 | } else { |
6937 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
6938 | at::AutoDispatchSkipFunctionalize guard; |
6939 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::batch_norm_update_stats_out::call(input_, running_mean_, running_var_, momentum, out0_, out1_); |
6940 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1);; |
6941 | } |
6942 | } else { |
6943 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
6944 | { |
6945 | at::AutoDispatchSkipFunctionalize guard; |
6946 | tmp_output = at::_ops::batch_norm_update_stats::call(input_, running_mean_, running_var_, momentum); |
6947 | } |
6948 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
6949 | at::functionalization::impl::commit_update(out0); |
6950 | at::functionalization::impl::sync(out0); |
6951 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
6952 | at::functionalization::impl::commit_update(out1); |
6953 | at::functionalization::impl::sync(out1); |
6954 | return ::std::tuple<at::Tensor &,at::Tensor &>(out0, out1); |
6955 | } |
6956 | } |
6957 | |
6958 | at::Tensor & _nnpack_spatial_convolution_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { |
6959 | if (false) { |
6960 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
6961 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
6962 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
6963 | auto input_meta = to_meta(input); |
6964 | auto weight_meta = to_meta(weight); |
6965 | auto bias_meta = to_meta(bias); |
6966 | auto out_meta = to_meta(out); |
6967 | at::AutoDispatchSkipFunctionalize func_guard; |
6968 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
6969 | at::_ops::_nnpack_spatial_convolution_out::call(input_meta, weight_meta, bias_meta, padding, stride, out_meta); |
6970 | } |
6971 | |
6972 | at::Tensor input_; |
6973 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
6974 | at::functionalization::impl::sync(input); |
6975 | input_ = at::functionalization::impl::from_functional_tensor(input); |
6976 | } else { |
6977 | input_ = input; |
6978 | } |
6979 | |
6980 | at::Tensor weight_; |
6981 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
6982 | at::functionalization::impl::sync(weight); |
6983 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
6984 | } else { |
6985 | weight_ = weight; |
6986 | } |
6987 | |
6988 | c10::optional<at::Tensor> bias_; |
6989 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
6990 | at::functionalization::impl::sync(bias); |
6991 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
6992 | } else { |
6993 | bias_ = bias; |
6994 | } |
6995 | |
6996 | at::Tensor out_; |
6997 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
6998 | at::functionalization::impl::sync(out); |
6999 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7000 | } else { |
7001 | out_ = out; |
7002 | } |
7003 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7004 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
7005 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7006 | TORCH_INTERNAL_ASSERT(false, |
7007 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7008 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7009 | } else { |
7010 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7011 | at::AutoDispatchSkipFunctionalize guard; |
7012 | at::Tensor tmp_output = at::_ops::_nnpack_spatial_convolution_out::call(input_, weight_, bias_, padding, stride, out_); |
7013 | return out;; |
7014 | } |
7015 | } else { |
7016 | at::Tensor tmp_output; |
7017 | { |
7018 | at::AutoDispatchSkipFunctionalize guard; |
7019 | tmp_output = at::_ops::_nnpack_spatial_convolution::call(input_, weight_, bias_, padding, stride); |
7020 | } |
7021 | at::functionalization::impl::replace_(out, tmp_output); |
7022 | at::functionalization::impl::commit_update(out); |
7023 | at::functionalization::impl::sync(out); |
7024 | return out; |
7025 | } |
7026 | } |
7027 | |
7028 | at::Tensor & ones_like_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) { |
7029 | if (false) { |
7030 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7031 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7032 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7033 | auto self_meta = to_meta(self); |
7034 | auto out_meta = to_meta(out); |
7035 | at::AutoDispatchSkipFunctionalize func_guard; |
7036 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7037 | at::_ops::ones_like_out::call(self_meta, memory_format, out_meta); |
7038 | } |
7039 | |
7040 | at::Tensor self_; |
7041 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7042 | at::functionalization::impl::sync(self); |
7043 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7044 | } else { |
7045 | self_ = self; |
7046 | } |
7047 | |
7048 | at::Tensor out_; |
7049 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7050 | at::functionalization::impl::sync(out); |
7051 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7052 | } else { |
7053 | out_ = out; |
7054 | } |
7055 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7056 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7057 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7058 | TORCH_INTERNAL_ASSERT(false, |
7059 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7060 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7061 | } else { |
7062 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7063 | at::AutoDispatchSkipFunctionalize guard; |
7064 | at::Tensor tmp_output = at::_ops::ones_like_out::call(self_, memory_format, out_); |
7065 | return out;; |
7066 | } |
7067 | } else { |
7068 | at::Tensor tmp_output; |
7069 | { |
7070 | at::AutoDispatchSkipFunctionalize guard; |
7071 | tmp_output = at::_ops::ones_like::call(self_, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt, memory_format); |
7072 | } |
7073 | at::functionalization::impl::replace_(out, tmp_output); |
7074 | at::functionalization::impl::commit_update(out); |
7075 | at::functionalization::impl::sync(out); |
7076 | return out; |
7077 | } |
7078 | } |
7079 | |
7080 | at::Tensor & channel_shuffle_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups, at::Tensor & out) { |
7081 | if (false) { |
7082 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7083 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7084 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7085 | auto self_meta = to_meta(self); |
7086 | auto out_meta = to_meta(out); |
7087 | at::AutoDispatchSkipFunctionalize func_guard; |
7088 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7089 | at::_ops::channel_shuffle_out::call(self_meta, groups, out_meta); |
7090 | } |
7091 | |
7092 | at::Tensor self_; |
7093 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7094 | at::functionalization::impl::sync(self); |
7095 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7096 | } else { |
7097 | self_ = self; |
7098 | } |
7099 | |
7100 | at::Tensor out_; |
7101 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7102 | at::functionalization::impl::sync(out); |
7103 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7104 | } else { |
7105 | out_ = out; |
7106 | } |
7107 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7108 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7109 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7110 | TORCH_INTERNAL_ASSERT(false, |
7111 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7112 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7113 | } else { |
7114 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7115 | at::AutoDispatchSkipFunctionalize guard; |
7116 | at::Tensor tmp_output = at::_ops::channel_shuffle_out::call(self_, groups, out_); |
7117 | return out;; |
7118 | } |
7119 | } else { |
7120 | at::Tensor tmp_output; |
7121 | { |
7122 | at::AutoDispatchSkipFunctionalize guard; |
7123 | tmp_output = at::_ops::channel_shuffle::call(self_, groups); |
7124 | } |
7125 | at::functionalization::impl::replace_(out, tmp_output); |
7126 | at::functionalization::impl::commit_update(out); |
7127 | at::functionalization::impl::sync(out); |
7128 | return out; |
7129 | } |
7130 | } |
7131 | |
7132 | at::Tensor & randint_out_out(c10::DispatchKeySet dispatchKeySet, int64_t high, c10::SymIntArrayRef size, at::Tensor & out) { |
7133 | if (false) { |
7134 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7135 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7136 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7137 | auto out_meta = to_meta(out); |
7138 | at::AutoDispatchSkipFunctionalize func_guard; |
7139 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7140 | at::_ops::randint_out::call(high, size, out_meta); |
7141 | } |
7142 | |
7143 | at::Tensor out_; |
7144 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7145 | at::functionalization::impl::sync(out); |
7146 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7147 | } else { |
7148 | out_ = out; |
7149 | } |
7150 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7151 | if ((false)) { |
7152 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7153 | TORCH_INTERNAL_ASSERT(false, |
7154 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7155 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7156 | } else { |
7157 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7158 | at::AutoDispatchSkipFunctionalize guard; |
7159 | at::Tensor tmp_output = at::_ops::randint_out::call(high, size, out_); |
7160 | return out;; |
7161 | } |
7162 | } else { |
7163 | at::Tensor tmp_output; |
7164 | { |
7165 | at::AutoDispatchSkipFunctionalize guard; |
7166 | tmp_output = at::_ops::randint::call(high, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7167 | } |
7168 | at::functionalization::impl::replace_(out, tmp_output); |
7169 | at::functionalization::impl::commit_update(out); |
7170 | at::functionalization::impl::sync(out); |
7171 | return out; |
7172 | } |
7173 | } |
7174 | |
7175 | at::Tensor & randint_out_generator_out(c10::DispatchKeySet dispatchKeySet, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
7176 | if (false) { |
7177 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7178 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7179 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7180 | auto out_meta = to_meta(out); |
7181 | at::AutoDispatchSkipFunctionalize func_guard; |
7182 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7183 | at::_ops::randint_generator_out::call(high, size, generator, out_meta); |
7184 | } |
7185 | |
7186 | at::Tensor out_; |
7187 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7188 | at::functionalization::impl::sync(out); |
7189 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7190 | } else { |
7191 | out_ = out; |
7192 | } |
7193 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7194 | if ((false)) { |
7195 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7196 | TORCH_INTERNAL_ASSERT(false, |
7197 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7198 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7199 | } else { |
7200 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7201 | at::AutoDispatchSkipFunctionalize guard; |
7202 | at::Tensor tmp_output = at::_ops::randint_generator_out::call(high, size, generator, out_); |
7203 | return out;; |
7204 | } |
7205 | } else { |
7206 | at::Tensor tmp_output; |
7207 | { |
7208 | at::AutoDispatchSkipFunctionalize guard; |
7209 | tmp_output = at::_ops::randint_generator::call(high, size, generator, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7210 | } |
7211 | at::functionalization::impl::replace_(out, tmp_output); |
7212 | at::functionalization::impl::commit_update(out); |
7213 | at::functionalization::impl::sync(out); |
7214 | return out; |
7215 | } |
7216 | } |
7217 | |
7218 | at::Tensor & randint_out_low_out(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, c10::SymIntArrayRef size, at::Tensor & out) { |
7219 | if (false) { |
7220 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7221 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7222 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7223 | auto out_meta = to_meta(out); |
7224 | at::AutoDispatchSkipFunctionalize func_guard; |
7225 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7226 | at::_ops::randint_low_out::call(low, high, size, out_meta); |
7227 | } |
7228 | |
7229 | at::Tensor out_; |
7230 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7231 | at::functionalization::impl::sync(out); |
7232 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7233 | } else { |
7234 | out_ = out; |
7235 | } |
7236 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7237 | if ((false)) { |
7238 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7239 | TORCH_INTERNAL_ASSERT(false, |
7240 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7241 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7242 | } else { |
7243 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7244 | at::AutoDispatchSkipFunctionalize guard; |
7245 | at::Tensor tmp_output = at::_ops::randint_low_out::call(low, high, size, out_); |
7246 | return out;; |
7247 | } |
7248 | } else { |
7249 | at::Tensor tmp_output; |
7250 | { |
7251 | at::AutoDispatchSkipFunctionalize guard; |
7252 | tmp_output = at::_ops::randint_low::call(low, high, size, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7253 | } |
7254 | at::functionalization::impl::replace_(out, tmp_output); |
7255 | at::functionalization::impl::commit_update(out); |
7256 | at::functionalization::impl::sync(out); |
7257 | return out; |
7258 | } |
7259 | } |
7260 | |
7261 | at::Tensor & randint_out_low_generator_out(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::Tensor & out) { |
7262 | if (false) { |
7263 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7264 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7265 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7266 | auto out_meta = to_meta(out); |
7267 | at::AutoDispatchSkipFunctionalize func_guard; |
7268 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7269 | at::_ops::randint_low_generator_out::call(low, high, size, generator, out_meta); |
7270 | } |
7271 | |
7272 | at::Tensor out_; |
7273 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7274 | at::functionalization::impl::sync(out); |
7275 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7276 | } else { |
7277 | out_ = out; |
7278 | } |
7279 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7280 | if ((false)) { |
7281 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7282 | TORCH_INTERNAL_ASSERT(false, |
7283 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7284 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7285 | } else { |
7286 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7287 | at::AutoDispatchSkipFunctionalize guard; |
7288 | at::Tensor tmp_output = at::_ops::randint_low_generator_out::call(low, high, size, generator, out_); |
7289 | return out;; |
7290 | } |
7291 | } else { |
7292 | at::Tensor tmp_output; |
7293 | { |
7294 | at::AutoDispatchSkipFunctionalize guard; |
7295 | tmp_output = at::_ops::randint_low_generator::call(low, high, size, generator, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7296 | } |
7297 | at::functionalization::impl::replace_(out, tmp_output); |
7298 | at::functionalization::impl::commit_update(out); |
7299 | at::functionalization::impl::sync(out); |
7300 | return out; |
7301 | } |
7302 | } |
7303 | |
7304 | at::Tensor & randperm_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) { |
7305 | if (false) { |
7306 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7307 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7308 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7309 | auto out_meta = to_meta(out); |
7310 | at::AutoDispatchSkipFunctionalize func_guard; |
7311 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7312 | at::_ops::randperm_out::call(n, out_meta); |
7313 | } |
7314 | |
7315 | at::Tensor out_; |
7316 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7317 | at::functionalization::impl::sync(out); |
7318 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7319 | } else { |
7320 | out_ = out; |
7321 | } |
7322 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7323 | if ((false)) { |
7324 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7325 | TORCH_INTERNAL_ASSERT(false, |
7326 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7327 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7328 | } else { |
7329 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7330 | at::AutoDispatchSkipFunctionalize guard; |
7331 | at::Tensor tmp_output = at::_ops::randperm_out::call(n, out_); |
7332 | return out;; |
7333 | } |
7334 | } else { |
7335 | at::Tensor tmp_output; |
7336 | { |
7337 | at::AutoDispatchSkipFunctionalize guard; |
7338 | tmp_output = at::_ops::randperm::call(n, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7339 | } |
7340 | at::functionalization::impl::replace_(out, tmp_output); |
7341 | at::functionalization::impl::commit_update(out); |
7342 | at::functionalization::impl::sync(out); |
7343 | return out; |
7344 | } |
7345 | } |
7346 | |
7347 | at::Tensor & randperm_out_generator_out(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional<at::Generator> generator, at::Tensor & out) { |
7348 | if (false) { |
7349 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7350 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7351 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7352 | auto out_meta = to_meta(out); |
7353 | at::AutoDispatchSkipFunctionalize func_guard; |
7354 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7355 | at::_ops::randperm_generator_out::call(n, generator, out_meta); |
7356 | } |
7357 | |
7358 | at::Tensor out_; |
7359 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7360 | at::functionalization::impl::sync(out); |
7361 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7362 | } else { |
7363 | out_ = out; |
7364 | } |
7365 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7366 | if ((false)) { |
7367 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7368 | TORCH_INTERNAL_ASSERT(false, |
7369 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7370 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7371 | } else { |
7372 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7373 | at::AutoDispatchSkipFunctionalize guard; |
7374 | at::Tensor tmp_output = at::_ops::randperm_generator_out::call(n, generator, out_); |
7375 | return out;; |
7376 | } |
7377 | } else { |
7378 | at::Tensor tmp_output; |
7379 | { |
7380 | at::AutoDispatchSkipFunctionalize guard; |
7381 | tmp_output = at::_ops::randperm_generator::call(n, generator, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7382 | } |
7383 | at::functionalization::impl::replace_(out, tmp_output); |
7384 | at::functionalization::impl::commit_update(out); |
7385 | at::functionalization::impl::sync(out); |
7386 | return out; |
7387 | } |
7388 | } |
7389 | |
7390 | at::Tensor & range_out_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) { |
7391 | if (false) { |
7392 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7393 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7394 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7395 | auto out_meta = to_meta(out); |
7396 | at::AutoDispatchSkipFunctionalize func_guard; |
7397 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7398 | at::_ops::range_out::call(start, end, step, out_meta); |
7399 | } |
7400 | |
7401 | at::Tensor out_; |
7402 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7403 | at::functionalization::impl::sync(out); |
7404 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7405 | } else { |
7406 | out_ = out; |
7407 | } |
7408 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7409 | if ((false)) { |
7410 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7411 | TORCH_INTERNAL_ASSERT(false, |
7412 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7413 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7414 | } else { |
7415 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7416 | at::AutoDispatchSkipFunctionalize guard; |
7417 | at::Tensor tmp_output = at::_ops::range_out::call(start, end, step, out_); |
7418 | return out;; |
7419 | } |
7420 | } else { |
7421 | at::Tensor tmp_output; |
7422 | { |
7423 | at::AutoDispatchSkipFunctionalize guard; |
7424 | tmp_output = at::_ops::range_step::call(start, end, step, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7425 | } |
7426 | at::functionalization::impl::replace_(out, tmp_output); |
7427 | at::functionalization::impl::commit_update(out); |
7428 | at::functionalization::impl::sync(out); |
7429 | return out; |
7430 | } |
7431 | } |
7432 | |
7433 | at::Tensor & range_out_out_(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out) { |
7434 | if (false) { |
7435 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7436 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7437 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7438 | auto out_meta = to_meta(out); |
7439 | at::AutoDispatchSkipFunctionalize func_guard; |
7440 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7441 | at::_ops::range_out_::call(start, end, out_meta); |
7442 | } |
7443 | |
7444 | at::Tensor out_; |
7445 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7446 | at::functionalization::impl::sync(out); |
7447 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7448 | } else { |
7449 | out_ = out; |
7450 | } |
7451 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7452 | if ((false)) { |
7453 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7454 | TORCH_INTERNAL_ASSERT(false, |
7455 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7456 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7457 | } else { |
7458 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7459 | at::AutoDispatchSkipFunctionalize guard; |
7460 | at::Tensor tmp_output = at::_ops::range_out_::call(start, end, out_); |
7461 | return out;; |
7462 | } |
7463 | } else { |
7464 | at::Tensor tmp_output; |
7465 | { |
7466 | at::AutoDispatchSkipFunctionalize guard; |
7467 | tmp_output = at::_ops::range::call(start, end, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
7468 | } |
7469 | at::functionalization::impl::replace_(out, tmp_output); |
7470 | at::functionalization::impl::commit_update(out); |
7471 | at::functionalization::impl::sync(out); |
7472 | return out; |
7473 | } |
7474 | } |
7475 | |
7476 | at::Tensor & reciprocal_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7477 | if (false) { |
7478 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7479 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7480 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7481 | auto self_meta = to_meta(self); |
7482 | auto out_meta = to_meta(out); |
7483 | at::AutoDispatchSkipFunctionalize func_guard; |
7484 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7485 | at::_ops::reciprocal_out::call(self_meta, out_meta); |
7486 | } |
7487 | |
7488 | at::Tensor self_; |
7489 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7490 | at::functionalization::impl::sync(self); |
7491 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7492 | } else { |
7493 | self_ = self; |
7494 | } |
7495 | |
7496 | at::Tensor out_; |
7497 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7498 | at::functionalization::impl::sync(out); |
7499 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7500 | } else { |
7501 | out_ = out; |
7502 | } |
7503 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7504 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7505 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7506 | TORCH_INTERNAL_ASSERT(false, |
7507 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7508 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7509 | } else { |
7510 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7511 | at::AutoDispatchSkipFunctionalize guard; |
7512 | at::Tensor tmp_output = at::_ops::reciprocal_out::call(self_, out_); |
7513 | return out;; |
7514 | } |
7515 | } else { |
7516 | at::Tensor tmp_output; |
7517 | { |
7518 | at::AutoDispatchSkipFunctionalize guard; |
7519 | tmp_output = at::_ops::reciprocal::call(self_); |
7520 | } |
7521 | at::functionalization::impl::replace_(out, tmp_output); |
7522 | at::functionalization::impl::commit_update(out); |
7523 | at::functionalization::impl::sync(out); |
7524 | return out; |
7525 | } |
7526 | } |
7527 | |
7528 | at::Tensor & reciprocal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7529 | if (true) { |
7530 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7531 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7532 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7533 | auto self_meta = to_meta(self); |
7534 | at::AutoDispatchSkipFunctionalize func_guard; |
7535 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7536 | at::_ops::reciprocal_::call(self_meta); |
7537 | } |
7538 | |
7539 | at::Tensor self_; |
7540 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7541 | at::functionalization::impl::sync(self); |
7542 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7543 | } else { |
7544 | self_ = self; |
7545 | } |
7546 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7547 | if ((false)) { |
7548 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7549 | TORCH_INTERNAL_ASSERT(false, |
7550 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7551 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7552 | } else { |
7553 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7554 | at::AutoDispatchSkipFunctionalize guard; |
7555 | at::Tensor tmp_output = at::_ops::reciprocal_::call(self_); |
7556 | return self;; |
7557 | } |
7558 | } else { |
7559 | at::Tensor tmp_output; |
7560 | { |
7561 | at::AutoDispatchSkipFunctionalize guard; |
7562 | tmp_output = at::_ops::reciprocal::call(self_); |
7563 | } |
7564 | at::functionalization::impl::replace_(self, tmp_output); |
7565 | at::functionalization::impl::commit_update(self); |
7566 | at::functionalization::impl::sync(self); |
7567 | return self; |
7568 | } |
7569 | } |
7570 | |
7571 | at::Tensor & gelu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) { |
7572 | if (false) { |
7573 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7574 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7575 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7576 | auto grad_output_meta = to_meta(grad_output); |
7577 | auto self_meta = to_meta(self); |
7578 | auto grad_input_meta = to_meta(grad_input); |
7579 | at::AutoDispatchSkipFunctionalize func_guard; |
7580 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7581 | at::_ops::gelu_backward_grad_input::call(grad_output_meta, self_meta, approximate, grad_input_meta); |
7582 | } |
7583 | |
7584 | at::Tensor grad_output_; |
7585 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
7586 | at::functionalization::impl::sync(grad_output); |
7587 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
7588 | } else { |
7589 | grad_output_ = grad_output; |
7590 | } |
7591 | |
7592 | at::Tensor self_; |
7593 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7594 | at::functionalization::impl::sync(self); |
7595 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7596 | } else { |
7597 | self_ = self; |
7598 | } |
7599 | |
7600 | at::Tensor grad_input_; |
7601 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
7602 | at::functionalization::impl::sync(grad_input); |
7603 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
7604 | } else { |
7605 | grad_input_ = grad_input; |
7606 | } |
7607 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
7608 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
7609 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7610 | TORCH_INTERNAL_ASSERT(false, |
7611 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7612 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7613 | } else { |
7614 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7615 | at::AutoDispatchSkipFunctionalize guard; |
7616 | at::Tensor tmp_output = at::_ops::gelu_backward_grad_input::call(grad_output_, self_, approximate, grad_input_); |
7617 | return grad_input;; |
7618 | } |
7619 | } else { |
7620 | at::Tensor tmp_output; |
7621 | { |
7622 | at::AutoDispatchSkipFunctionalize guard; |
7623 | tmp_output = at::_ops::gelu_backward::call(grad_output_, self_, approximate); |
7624 | } |
7625 | at::functionalization::impl::replace_(grad_input, tmp_output); |
7626 | at::functionalization::impl::commit_update(grad_input); |
7627 | at::functionalization::impl::sync(grad_input); |
7628 | return grad_input; |
7629 | } |
7630 | } |
7631 | |
7632 | at::Tensor & hardshrink_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { |
7633 | if (false) { |
7634 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7635 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7636 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7637 | auto self_meta = to_meta(self); |
7638 | auto out_meta = to_meta(out); |
7639 | at::AutoDispatchSkipFunctionalize func_guard; |
7640 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7641 | at::_ops::hardshrink_out::call(self_meta, lambd, out_meta); |
7642 | } |
7643 | |
7644 | at::Tensor self_; |
7645 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7646 | at::functionalization::impl::sync(self); |
7647 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7648 | } else { |
7649 | self_ = self; |
7650 | } |
7651 | |
7652 | at::Tensor out_; |
7653 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7654 | at::functionalization::impl::sync(out); |
7655 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7656 | } else { |
7657 | out_ = out; |
7658 | } |
7659 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7660 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7661 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7662 | TORCH_INTERNAL_ASSERT(false, |
7663 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7664 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7665 | } else { |
7666 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7667 | at::AutoDispatchSkipFunctionalize guard; |
7668 | at::Tensor tmp_output = at::_ops::hardshrink_out::call(self_, lambd, out_); |
7669 | return out;; |
7670 | } |
7671 | } else { |
7672 | at::Tensor tmp_output; |
7673 | { |
7674 | at::AutoDispatchSkipFunctionalize guard; |
7675 | tmp_output = at::_ops::hardshrink::call(self_, lambd); |
7676 | } |
7677 | at::functionalization::impl::replace_(out, tmp_output); |
7678 | at::functionalization::impl::commit_update(out); |
7679 | at::functionalization::impl::sync(out); |
7680 | return out; |
7681 | } |
7682 | } |
7683 | |
7684 | at::Tensor & hardshrink_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { |
7685 | if (false) { |
7686 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7687 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7688 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7689 | auto grad_out_meta = to_meta(grad_out); |
7690 | auto self_meta = to_meta(self); |
7691 | auto grad_input_meta = to_meta(grad_input); |
7692 | at::AutoDispatchSkipFunctionalize func_guard; |
7693 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7694 | at::_ops::hardshrink_backward_grad_input::call(grad_out_meta, self_meta, lambd, grad_input_meta); |
7695 | } |
7696 | |
7697 | at::Tensor grad_out_; |
7698 | if (at::functionalization::impl::isFunctionalTensor(grad_out)) { |
7699 | at::functionalization::impl::sync(grad_out); |
7700 | grad_out_ = at::functionalization::impl::from_functional_tensor(grad_out); |
7701 | } else { |
7702 | grad_out_ = grad_out; |
7703 | } |
7704 | |
7705 | at::Tensor self_; |
7706 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7707 | at::functionalization::impl::sync(self); |
7708 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7709 | } else { |
7710 | self_ = self; |
7711 | } |
7712 | |
7713 | at::Tensor grad_input_; |
7714 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
7715 | at::functionalization::impl::sync(grad_input); |
7716 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
7717 | } else { |
7718 | grad_input_ = grad_input; |
7719 | } |
7720 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
7721 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_out) || at::functionalization::impl::isFunctionalTensor(self))) { |
7722 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7723 | TORCH_INTERNAL_ASSERT(false, |
7724 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7725 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7726 | } else { |
7727 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7728 | at::AutoDispatchSkipFunctionalize guard; |
7729 | at::Tensor tmp_output = at::_ops::hardshrink_backward_grad_input::call(grad_out_, self_, lambd, grad_input_); |
7730 | return grad_input;; |
7731 | } |
7732 | } else { |
7733 | at::Tensor tmp_output; |
7734 | { |
7735 | at::AutoDispatchSkipFunctionalize guard; |
7736 | tmp_output = at::_ops::hardshrink_backward::call(grad_out_, self_, lambd); |
7737 | } |
7738 | at::functionalization::impl::replace_(grad_input, tmp_output); |
7739 | at::functionalization::impl::commit_update(grad_input); |
7740 | at::functionalization::impl::sync(grad_input); |
7741 | return grad_input; |
7742 | } |
7743 | } |
7744 | |
7745 | at::Tensor & silu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7746 | if (false) { |
7747 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7748 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7749 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7750 | auto self_meta = to_meta(self); |
7751 | auto out_meta = to_meta(out); |
7752 | at::AutoDispatchSkipFunctionalize func_guard; |
7753 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7754 | at::_ops::silu_out::call(self_meta, out_meta); |
7755 | } |
7756 | |
7757 | at::Tensor self_; |
7758 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7759 | at::functionalization::impl::sync(self); |
7760 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7761 | } else { |
7762 | self_ = self; |
7763 | } |
7764 | |
7765 | at::Tensor out_; |
7766 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7767 | at::functionalization::impl::sync(out); |
7768 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7769 | } else { |
7770 | out_ = out; |
7771 | } |
7772 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7773 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7774 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7775 | TORCH_INTERNAL_ASSERT(false, |
7776 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7777 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7778 | } else { |
7779 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7780 | at::AutoDispatchSkipFunctionalize guard; |
7781 | at::Tensor tmp_output = at::_ops::silu_out::call(self_, out_); |
7782 | return out;; |
7783 | } |
7784 | } else { |
7785 | at::Tensor tmp_output; |
7786 | { |
7787 | at::AutoDispatchSkipFunctionalize guard; |
7788 | tmp_output = at::_ops::silu::call(self_); |
7789 | } |
7790 | at::functionalization::impl::replace_(out, tmp_output); |
7791 | at::functionalization::impl::commit_update(out); |
7792 | at::functionalization::impl::sync(out); |
7793 | return out; |
7794 | } |
7795 | } |
7796 | |
7797 | at::Tensor & silu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7798 | if (true) { |
7799 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7800 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7801 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7802 | auto self_meta = to_meta(self); |
7803 | at::AutoDispatchSkipFunctionalize func_guard; |
7804 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7805 | at::_ops::silu_::call(self_meta); |
7806 | } |
7807 | |
7808 | at::Tensor self_; |
7809 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7810 | at::functionalization::impl::sync(self); |
7811 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7812 | } else { |
7813 | self_ = self; |
7814 | } |
7815 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7816 | if ((false)) { |
7817 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7818 | TORCH_INTERNAL_ASSERT(false, |
7819 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7820 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7821 | } else { |
7822 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7823 | at::AutoDispatchSkipFunctionalize guard; |
7824 | at::Tensor tmp_output = at::_ops::silu_::call(self_); |
7825 | return self;; |
7826 | } |
7827 | } else { |
7828 | at::Tensor tmp_output; |
7829 | { |
7830 | at::AutoDispatchSkipFunctionalize guard; |
7831 | tmp_output = at::_ops::silu::call(self_); |
7832 | } |
7833 | at::functionalization::impl::replace_(self, tmp_output); |
7834 | at::functionalization::impl::commit_update(self); |
7835 | at::functionalization::impl::sync(self); |
7836 | return self; |
7837 | } |
7838 | } |
7839 | |
7840 | at::Tensor & silu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { |
7841 | if (false) { |
7842 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7843 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7844 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7845 | auto grad_output_meta = to_meta(grad_output); |
7846 | auto self_meta = to_meta(self); |
7847 | auto grad_input_meta = to_meta(grad_input); |
7848 | at::AutoDispatchSkipFunctionalize func_guard; |
7849 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7850 | at::_ops::silu_backward_grad_input::call(grad_output_meta, self_meta, grad_input_meta); |
7851 | } |
7852 | |
7853 | at::Tensor grad_output_; |
7854 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
7855 | at::functionalization::impl::sync(grad_output); |
7856 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
7857 | } else { |
7858 | grad_output_ = grad_output; |
7859 | } |
7860 | |
7861 | at::Tensor self_; |
7862 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7863 | at::functionalization::impl::sync(self); |
7864 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7865 | } else { |
7866 | self_ = self; |
7867 | } |
7868 | |
7869 | at::Tensor grad_input_; |
7870 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
7871 | at::functionalization::impl::sync(grad_input); |
7872 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
7873 | } else { |
7874 | grad_input_ = grad_input; |
7875 | } |
7876 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
7877 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
7878 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7879 | TORCH_INTERNAL_ASSERT(false, |
7880 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7881 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7882 | } else { |
7883 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7884 | at::AutoDispatchSkipFunctionalize guard; |
7885 | at::Tensor tmp_output = at::_ops::silu_backward_grad_input::call(grad_output_, self_, grad_input_); |
7886 | return grad_input;; |
7887 | } |
7888 | } else { |
7889 | at::Tensor tmp_output; |
7890 | { |
7891 | at::AutoDispatchSkipFunctionalize guard; |
7892 | tmp_output = at::_ops::silu_backward::call(grad_output_, self_); |
7893 | } |
7894 | at::functionalization::impl::replace_(grad_input, tmp_output); |
7895 | at::functionalization::impl::commit_update(grad_input); |
7896 | at::functionalization::impl::sync(grad_input); |
7897 | return grad_input; |
7898 | } |
7899 | } |
7900 | |
7901 | at::Tensor & sin_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
7902 | if (false) { |
7903 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7904 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7905 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7906 | auto self_meta = to_meta(self); |
7907 | auto out_meta = to_meta(out); |
7908 | at::AutoDispatchSkipFunctionalize func_guard; |
7909 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7910 | at::_ops::sin_out::call(self_meta, out_meta); |
7911 | } |
7912 | |
7913 | at::Tensor self_; |
7914 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7915 | at::functionalization::impl::sync(self); |
7916 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7917 | } else { |
7918 | self_ = self; |
7919 | } |
7920 | |
7921 | at::Tensor out_; |
7922 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
7923 | at::functionalization::impl::sync(out); |
7924 | out_ = at::functionalization::impl::from_functional_tensor(out); |
7925 | } else { |
7926 | out_ = out; |
7927 | } |
7928 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
7929 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
7930 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7931 | TORCH_INTERNAL_ASSERT(false, |
7932 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7933 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7934 | } else { |
7935 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7936 | at::AutoDispatchSkipFunctionalize guard; |
7937 | at::Tensor tmp_output = at::_ops::sin_out::call(self_, out_); |
7938 | return out;; |
7939 | } |
7940 | } else { |
7941 | at::Tensor tmp_output; |
7942 | { |
7943 | at::AutoDispatchSkipFunctionalize guard; |
7944 | tmp_output = at::_ops::sin::call(self_); |
7945 | } |
7946 | at::functionalization::impl::replace_(out, tmp_output); |
7947 | at::functionalization::impl::commit_update(out); |
7948 | at::functionalization::impl::sync(out); |
7949 | return out; |
7950 | } |
7951 | } |
7952 | |
7953 | at::Tensor & sin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
7954 | if (true) { |
7955 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7956 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
7957 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
7958 | auto self_meta = to_meta(self); |
7959 | at::AutoDispatchSkipFunctionalize func_guard; |
7960 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
7961 | at::_ops::sin_::call(self_meta); |
7962 | } |
7963 | |
7964 | at::Tensor self_; |
7965 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
7966 | at::functionalization::impl::sync(self); |
7967 | self_ = at::functionalization::impl::from_functional_tensor(self); |
7968 | } else { |
7969 | self_ = self; |
7970 | } |
7971 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
7972 | if ((false)) { |
7973 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
7974 | TORCH_INTERNAL_ASSERT(false, |
7975 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
7976 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
7977 | } else { |
7978 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
7979 | at::AutoDispatchSkipFunctionalize guard; |
7980 | at::Tensor tmp_output = at::_ops::sin_::call(self_); |
7981 | return self;; |
7982 | } |
7983 | } else { |
7984 | at::Tensor tmp_output; |
7985 | { |
7986 | at::AutoDispatchSkipFunctionalize guard; |
7987 | tmp_output = at::_ops::sin::call(self_); |
7988 | } |
7989 | at::functionalization::impl::replace_(self, tmp_output); |
7990 | at::functionalization::impl::commit_update(self); |
7991 | at::functionalization::impl::sync(self); |
7992 | return self; |
7993 | } |
7994 | } |
7995 | |
7996 | at::Tensor & _softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
7997 | if (false) { |
7998 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
7999 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8000 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8001 | auto self_meta = to_meta(self); |
8002 | auto out_meta = to_meta(out); |
8003 | at::AutoDispatchSkipFunctionalize func_guard; |
8004 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8005 | at::_ops::_softmax_out::call(self_meta, dim, half_to_float, out_meta); |
8006 | } |
8007 | |
8008 | at::Tensor self_; |
8009 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8010 | at::functionalization::impl::sync(self); |
8011 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8012 | } else { |
8013 | self_ = self; |
8014 | } |
8015 | |
8016 | at::Tensor out_; |
8017 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8018 | at::functionalization::impl::sync(out); |
8019 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8020 | } else { |
8021 | out_ = out; |
8022 | } |
8023 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8024 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8025 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8026 | TORCH_INTERNAL_ASSERT(false, |
8027 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8028 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8029 | } else { |
8030 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8031 | at::AutoDispatchSkipFunctionalize guard; |
8032 | at::Tensor tmp_output = at::_ops::_softmax_out::call(self_, dim, half_to_float, out_); |
8033 | return out;; |
8034 | } |
8035 | } else { |
8036 | at::Tensor tmp_output; |
8037 | { |
8038 | at::AutoDispatchSkipFunctionalize guard; |
8039 | tmp_output = at::_ops::_softmax::call(self_, dim, half_to_float); |
8040 | } |
8041 | at::functionalization::impl::replace_(out, tmp_output); |
8042 | at::functionalization::impl::commit_update(out); |
8043 | at::functionalization::impl::sync(out); |
8044 | return out; |
8045 | } |
8046 | } |
8047 | |
8048 | at::Tensor & sspaddmm_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { |
8049 | if (false) { |
8050 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8051 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8052 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8053 | auto self_meta = to_meta(self); |
8054 | auto mat1_meta = to_meta(mat1); |
8055 | auto mat2_meta = to_meta(mat2); |
8056 | auto out_meta = to_meta(out); |
8057 | at::AutoDispatchSkipFunctionalize func_guard; |
8058 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8059 | at::_ops::sspaddmm_out::call(self_meta, mat1_meta, mat2_meta, beta, alpha, out_meta); |
8060 | } |
8061 | |
8062 | at::Tensor self_; |
8063 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8064 | at::functionalization::impl::sync(self); |
8065 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8066 | } else { |
8067 | self_ = self; |
8068 | } |
8069 | |
8070 | at::Tensor mat1_; |
8071 | if (at::functionalization::impl::isFunctionalTensor(mat1)) { |
8072 | at::functionalization::impl::sync(mat1); |
8073 | mat1_ = at::functionalization::impl::from_functional_tensor(mat1); |
8074 | } else { |
8075 | mat1_ = mat1; |
8076 | } |
8077 | |
8078 | at::Tensor mat2_; |
8079 | if (at::functionalization::impl::isFunctionalTensor(mat2)) { |
8080 | at::functionalization::impl::sync(mat2); |
8081 | mat2_ = at::functionalization::impl::from_functional_tensor(mat2); |
8082 | } else { |
8083 | mat2_ = mat2; |
8084 | } |
8085 | |
8086 | at::Tensor out_; |
8087 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8088 | at::functionalization::impl::sync(out); |
8089 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8090 | } else { |
8091 | out_ = out; |
8092 | } |
8093 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8094 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mat1) || at::functionalization::impl::isFunctionalTensor(mat2))) { |
8095 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8096 | TORCH_INTERNAL_ASSERT(false, |
8097 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8098 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8099 | } else { |
8100 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8101 | at::AutoDispatchSkipFunctionalize guard; |
8102 | at::Tensor tmp_output = at::_ops::sspaddmm_out::call(self_, mat1_, mat2_, beta, alpha, out_); |
8103 | return out;; |
8104 | } |
8105 | } else { |
8106 | at::Tensor tmp_output; |
8107 | { |
8108 | at::AutoDispatchSkipFunctionalize guard; |
8109 | tmp_output = at::_ops::sspaddmm::call(self_, mat1_, mat2_, beta, alpha); |
8110 | } |
8111 | at::functionalization::impl::replace_(out, tmp_output); |
8112 | at::functionalization::impl::commit_update(out); |
8113 | at::functionalization::impl::sync(out); |
8114 | return out; |
8115 | } |
8116 | } |
8117 | |
8118 | at::Tensor & _stack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { |
8119 | if (false) { |
8120 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8121 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8122 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8123 | auto tensors_meta = to_meta(tensors); |
8124 | auto out_meta = to_meta(out); |
8125 | at::AutoDispatchSkipFunctionalize func_guard; |
8126 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8127 | at::_ops::_stack_out::call(tensors_meta, dim, out_meta); |
8128 | } |
8129 | |
8130 | ::std::vector<at::Tensor> tensors_; |
8131 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
8132 | at::functionalization::impl::sync(tensors); |
8133 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
8134 | } else { |
8135 | tensors_ = tensors.vec(); |
8136 | } |
8137 | |
8138 | at::Tensor out_; |
8139 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8140 | at::functionalization::impl::sync(out); |
8141 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8142 | } else { |
8143 | out_ = out; |
8144 | } |
8145 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8146 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
8147 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8148 | TORCH_INTERNAL_ASSERT(false, |
8149 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8150 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8151 | } else { |
8152 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8153 | at::AutoDispatchSkipFunctionalize guard; |
8154 | at::Tensor tmp_output = at::_ops::_stack_out::call(tensors_, dim, out_); |
8155 | return out;; |
8156 | } |
8157 | } else { |
8158 | at::Tensor tmp_output; |
8159 | { |
8160 | at::AutoDispatchSkipFunctionalize guard; |
8161 | tmp_output = at::_ops::_stack::call(tensors_, dim); |
8162 | } |
8163 | at::functionalization::impl::replace_(out, tmp_output); |
8164 | at::functionalization::impl::commit_update(out); |
8165 | at::functionalization::impl::sync(out); |
8166 | return out; |
8167 | } |
8168 | } |
8169 | |
8170 | at::Tensor & hstack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { |
8171 | if (false) { |
8172 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8173 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8174 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8175 | auto tensors_meta = to_meta(tensors); |
8176 | auto out_meta = to_meta(out); |
8177 | at::AutoDispatchSkipFunctionalize func_guard; |
8178 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8179 | at::_ops::hstack_out::call(tensors_meta, out_meta); |
8180 | } |
8181 | |
8182 | ::std::vector<at::Tensor> tensors_; |
8183 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
8184 | at::functionalization::impl::sync(tensors); |
8185 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
8186 | } else { |
8187 | tensors_ = tensors.vec(); |
8188 | } |
8189 | |
8190 | at::Tensor out_; |
8191 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8192 | at::functionalization::impl::sync(out); |
8193 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8194 | } else { |
8195 | out_ = out; |
8196 | } |
8197 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8198 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
8199 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8200 | TORCH_INTERNAL_ASSERT(false, |
8201 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8202 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8203 | } else { |
8204 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8205 | at::AutoDispatchSkipFunctionalize guard; |
8206 | at::Tensor tmp_output = at::_ops::hstack_out::call(tensors_, out_); |
8207 | return out;; |
8208 | } |
8209 | } else { |
8210 | at::Tensor tmp_output; |
8211 | { |
8212 | at::AutoDispatchSkipFunctionalize guard; |
8213 | tmp_output = at::_ops::hstack::call(tensors_); |
8214 | } |
8215 | at::functionalization::impl::replace_(out, tmp_output); |
8216 | at::functionalization::impl::commit_update(out); |
8217 | at::functionalization::impl::sync(out); |
8218 | return out; |
8219 | } |
8220 | } |
8221 | |
8222 | at::Tensor & dstack_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { |
8223 | if (false) { |
8224 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8225 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8226 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8227 | auto tensors_meta = to_meta(tensors); |
8228 | auto out_meta = to_meta(out); |
8229 | at::AutoDispatchSkipFunctionalize func_guard; |
8230 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8231 | at::_ops::dstack_out::call(tensors_meta, out_meta); |
8232 | } |
8233 | |
8234 | ::std::vector<at::Tensor> tensors_; |
8235 | if (at::functionalization::impl::isFunctionalTensor(tensors)) { |
8236 | at::functionalization::impl::sync(tensors); |
8237 | tensors_ = at::functionalization::impl::from_functional_tensor(tensors); |
8238 | } else { |
8239 | tensors_ = tensors.vec(); |
8240 | } |
8241 | |
8242 | at::Tensor out_; |
8243 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8244 | at::functionalization::impl::sync(out); |
8245 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8246 | } else { |
8247 | out_ = out; |
8248 | } |
8249 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8250 | if ((false || at::functionalization::impl::isFunctionalTensor(tensors))) { |
8251 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8252 | TORCH_INTERNAL_ASSERT(false, |
8253 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8254 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8255 | } else { |
8256 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8257 | at::AutoDispatchSkipFunctionalize guard; |
8258 | at::Tensor tmp_output = at::_ops::dstack_out::call(tensors_, out_); |
8259 | return out;; |
8260 | } |
8261 | } else { |
8262 | at::Tensor tmp_output; |
8263 | { |
8264 | at::AutoDispatchSkipFunctionalize guard; |
8265 | tmp_output = at::_ops::dstack::call(tensors_); |
8266 | } |
8267 | at::functionalization::impl::replace_(out, tmp_output); |
8268 | at::functionalization::impl::commit_update(out); |
8269 | at::functionalization::impl::sync(out); |
8270 | return out; |
8271 | } |
8272 | } |
8273 | |
8274 | at::Tensor & sum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
8275 | if (false) { |
8276 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8277 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8278 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8279 | auto self_meta = to_meta(self); |
8280 | auto out_meta = to_meta(out); |
8281 | at::AutoDispatchSkipFunctionalize func_guard; |
8282 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8283 | at::_ops::sum_out::call(self_meta, dtype, out_meta); |
8284 | } |
8285 | |
8286 | at::Tensor self_; |
8287 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8288 | at::functionalization::impl::sync(self); |
8289 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8290 | } else { |
8291 | self_ = self; |
8292 | } |
8293 | |
8294 | at::Tensor out_; |
8295 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8296 | at::functionalization::impl::sync(out); |
8297 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8298 | } else { |
8299 | out_ = out; |
8300 | } |
8301 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8302 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8303 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8304 | TORCH_INTERNAL_ASSERT(false, |
8305 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8306 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8307 | } else { |
8308 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8309 | at::AutoDispatchSkipFunctionalize guard; |
8310 | at::Tensor tmp_output = at::_ops::sum_out::call(self_, dtype, out_); |
8311 | return out;; |
8312 | } |
8313 | } else { |
8314 | at::Tensor tmp_output; |
8315 | { |
8316 | at::AutoDispatchSkipFunctionalize guard; |
8317 | tmp_output = at::_ops::sum::call(self_, dtype); |
8318 | } |
8319 | at::functionalization::impl::replace_(out, tmp_output); |
8320 | at::functionalization::impl::commit_update(out); |
8321 | at::functionalization::impl::sync(out); |
8322 | return out; |
8323 | } |
8324 | } |
8325 | |
8326 | at::Tensor & sum_out_IntList_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
8327 | if (false) { |
8328 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8329 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8330 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8331 | auto self_meta = to_meta(self); |
8332 | auto out_meta = to_meta(out); |
8333 | at::AutoDispatchSkipFunctionalize func_guard; |
8334 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8335 | at::_ops::sum_IntList_out::call(self_meta, dim, keepdim, dtype, out_meta); |
8336 | } |
8337 | |
8338 | at::Tensor self_; |
8339 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8340 | at::functionalization::impl::sync(self); |
8341 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8342 | } else { |
8343 | self_ = self; |
8344 | } |
8345 | |
8346 | at::Tensor out_; |
8347 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8348 | at::functionalization::impl::sync(out); |
8349 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8350 | } else { |
8351 | out_ = out; |
8352 | } |
8353 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8354 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8355 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8356 | TORCH_INTERNAL_ASSERT(false, |
8357 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8358 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8359 | } else { |
8360 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8361 | at::AutoDispatchSkipFunctionalize guard; |
8362 | at::Tensor tmp_output = at::_ops::sum_IntList_out::call(self_, dim, keepdim, dtype, out_); |
8363 | return out;; |
8364 | } |
8365 | } else { |
8366 | at::Tensor tmp_output; |
8367 | { |
8368 | at::AutoDispatchSkipFunctionalize guard; |
8369 | tmp_output = at::_ops::sum_dim_IntList::call(self_, dim, keepdim, dtype); |
8370 | } |
8371 | at::functionalization::impl::replace_(out, tmp_output); |
8372 | at::functionalization::impl::commit_update(out); |
8373 | at::functionalization::impl::sync(out); |
8374 | return out; |
8375 | } |
8376 | } |
8377 | |
8378 | at::Tensor & sum_out_DimnameList_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
8379 | if (false) { |
8380 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8381 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8382 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8383 | auto self_meta = to_meta(self); |
8384 | auto out_meta = to_meta(out); |
8385 | at::AutoDispatchSkipFunctionalize func_guard; |
8386 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8387 | at::_ops::sum_DimnameList_out::call(self_meta, dim, keepdim, dtype, out_meta); |
8388 | } |
8389 | |
8390 | at::Tensor self_; |
8391 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8392 | at::functionalization::impl::sync(self); |
8393 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8394 | } else { |
8395 | self_ = self; |
8396 | } |
8397 | |
8398 | at::Tensor out_; |
8399 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8400 | at::functionalization::impl::sync(out); |
8401 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8402 | } else { |
8403 | out_ = out; |
8404 | } |
8405 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8406 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8407 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8408 | TORCH_INTERNAL_ASSERT(false, |
8409 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8410 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8411 | } else { |
8412 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8413 | at::AutoDispatchSkipFunctionalize guard; |
8414 | at::Tensor tmp_output = at::_ops::sum_DimnameList_out::call(self_, dim, keepdim, dtype, out_); |
8415 | return out;; |
8416 | } |
8417 | } else { |
8418 | at::Tensor tmp_output; |
8419 | { |
8420 | at::AutoDispatchSkipFunctionalize guard; |
8421 | tmp_output = at::_ops::sum_dim_DimnameList::call(self_, dim, keepdim, dtype); |
8422 | } |
8423 | at::functionalization::impl::replace_(out, tmp_output); |
8424 | at::functionalization::impl::commit_update(out); |
8425 | at::functionalization::impl::sync(out); |
8426 | return out; |
8427 | } |
8428 | } |
8429 | |
8430 | at::Tensor & std_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { |
8431 | if (false) { |
8432 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8433 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8434 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8435 | auto self_meta = to_meta(self); |
8436 | auto out_meta = to_meta(out); |
8437 | at::AutoDispatchSkipFunctionalize func_guard; |
8438 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8439 | at::_ops::std_out::call(self_meta, dim, unbiased, keepdim, out_meta); |
8440 | } |
8441 | |
8442 | at::Tensor self_; |
8443 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8444 | at::functionalization::impl::sync(self); |
8445 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8446 | } else { |
8447 | self_ = self; |
8448 | } |
8449 | |
8450 | at::Tensor out_; |
8451 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8452 | at::functionalization::impl::sync(out); |
8453 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8454 | } else { |
8455 | out_ = out; |
8456 | } |
8457 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8458 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8459 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8460 | TORCH_INTERNAL_ASSERT(false, |
8461 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8462 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8463 | } else { |
8464 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8465 | at::AutoDispatchSkipFunctionalize guard; |
8466 | at::Tensor tmp_output = at::_ops::std_out::call(self_, dim, unbiased, keepdim, out_); |
8467 | return out;; |
8468 | } |
8469 | } else { |
8470 | at::Tensor tmp_output; |
8471 | { |
8472 | at::AutoDispatchSkipFunctionalize guard; |
8473 | tmp_output = at::_ops::std_dim::call(self_, dim, unbiased, keepdim); |
8474 | } |
8475 | at::functionalization::impl::replace_(out, tmp_output); |
8476 | at::functionalization::impl::commit_update(out); |
8477 | at::functionalization::impl::sync(out); |
8478 | return out; |
8479 | } |
8480 | } |
8481 | |
8482 | at::Tensor & std_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
8483 | if (false) { |
8484 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8485 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8486 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8487 | auto self_meta = to_meta(self); |
8488 | auto out_meta = to_meta(out); |
8489 | at::AutoDispatchSkipFunctionalize func_guard; |
8490 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8491 | at::_ops::std_correction_out::call(self_meta, dim, correction, keepdim, out_meta); |
8492 | } |
8493 | |
8494 | at::Tensor self_; |
8495 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8496 | at::functionalization::impl::sync(self); |
8497 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8498 | } else { |
8499 | self_ = self; |
8500 | } |
8501 | |
8502 | at::Tensor out_; |
8503 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8504 | at::functionalization::impl::sync(out); |
8505 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8506 | } else { |
8507 | out_ = out; |
8508 | } |
8509 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8510 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8511 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8512 | TORCH_INTERNAL_ASSERT(false, |
8513 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8514 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8515 | } else { |
8516 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8517 | at::AutoDispatchSkipFunctionalize guard; |
8518 | at::Tensor tmp_output = at::_ops::std_correction_out::call(self_, dim, correction, keepdim, out_); |
8519 | return out;; |
8520 | } |
8521 | } else { |
8522 | at::Tensor tmp_output; |
8523 | { |
8524 | at::AutoDispatchSkipFunctionalize guard; |
8525 | tmp_output = at::_ops::std_correction::call(self_, dim, correction, keepdim); |
8526 | } |
8527 | at::functionalization::impl::replace_(out, tmp_output); |
8528 | at::functionalization::impl::commit_update(out); |
8529 | at::functionalization::impl::sync(out); |
8530 | return out; |
8531 | } |
8532 | } |
8533 | |
8534 | at::Tensor & std_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { |
8535 | if (false) { |
8536 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8537 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8538 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8539 | auto self_meta = to_meta(self); |
8540 | auto out_meta = to_meta(out); |
8541 | at::AutoDispatchSkipFunctionalize func_guard; |
8542 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8543 | at::_ops::std_names_out::call(self_meta, dim, unbiased, keepdim, out_meta); |
8544 | } |
8545 | |
8546 | at::Tensor self_; |
8547 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8548 | at::functionalization::impl::sync(self); |
8549 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8550 | } else { |
8551 | self_ = self; |
8552 | } |
8553 | |
8554 | at::Tensor out_; |
8555 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8556 | at::functionalization::impl::sync(out); |
8557 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8558 | } else { |
8559 | out_ = out; |
8560 | } |
8561 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8562 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8563 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8564 | TORCH_INTERNAL_ASSERT(false, |
8565 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8566 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8567 | } else { |
8568 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8569 | at::AutoDispatchSkipFunctionalize guard; |
8570 | at::Tensor tmp_output = at::_ops::std_names_out::call(self_, dim, unbiased, keepdim, out_); |
8571 | return out;; |
8572 | } |
8573 | } else { |
8574 | at::Tensor tmp_output; |
8575 | { |
8576 | at::AutoDispatchSkipFunctionalize guard; |
8577 | tmp_output = at::_ops::std_names_dim::call(self_, dim, unbiased, keepdim); |
8578 | } |
8579 | at::functionalization::impl::replace_(out, tmp_output); |
8580 | at::functionalization::impl::commit_update(out); |
8581 | at::functionalization::impl::sync(out); |
8582 | return out; |
8583 | } |
8584 | } |
8585 | |
8586 | at::Tensor & std_out_correction_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
8587 | if (false) { |
8588 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8589 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8590 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8591 | auto self_meta = to_meta(self); |
8592 | auto out_meta = to_meta(out); |
8593 | at::AutoDispatchSkipFunctionalize func_guard; |
8594 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8595 | at::_ops::std_correction_names_out::call(self_meta, dim, correction, keepdim, out_meta); |
8596 | } |
8597 | |
8598 | at::Tensor self_; |
8599 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8600 | at::functionalization::impl::sync(self); |
8601 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8602 | } else { |
8603 | self_ = self; |
8604 | } |
8605 | |
8606 | at::Tensor out_; |
8607 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8608 | at::functionalization::impl::sync(out); |
8609 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8610 | } else { |
8611 | out_ = out; |
8612 | } |
8613 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8614 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8615 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8616 | TORCH_INTERNAL_ASSERT(false, |
8617 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8618 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8619 | } else { |
8620 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8621 | at::AutoDispatchSkipFunctionalize guard; |
8622 | at::Tensor tmp_output = at::_ops::std_correction_names_out::call(self_, dim, correction, keepdim, out_); |
8623 | return out;; |
8624 | } |
8625 | } else { |
8626 | at::Tensor tmp_output; |
8627 | { |
8628 | at::AutoDispatchSkipFunctionalize guard; |
8629 | tmp_output = at::_ops::std_correction_names::call(self_, dim, correction, keepdim); |
8630 | } |
8631 | at::functionalization::impl::replace_(out, tmp_output); |
8632 | at::functionalization::impl::commit_update(out); |
8633 | at::functionalization::impl::sync(out); |
8634 | return out; |
8635 | } |
8636 | } |
8637 | |
8638 | at::Tensor & tan_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8639 | if (false) { |
8640 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8641 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8642 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8643 | auto self_meta = to_meta(self); |
8644 | auto out_meta = to_meta(out); |
8645 | at::AutoDispatchSkipFunctionalize func_guard; |
8646 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8647 | at::_ops::tan_out::call(self_meta, out_meta); |
8648 | } |
8649 | |
8650 | at::Tensor self_; |
8651 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8652 | at::functionalization::impl::sync(self); |
8653 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8654 | } else { |
8655 | self_ = self; |
8656 | } |
8657 | |
8658 | at::Tensor out_; |
8659 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8660 | at::functionalization::impl::sync(out); |
8661 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8662 | } else { |
8663 | out_ = out; |
8664 | } |
8665 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8666 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8667 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8668 | TORCH_INTERNAL_ASSERT(false, |
8669 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8670 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8671 | } else { |
8672 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8673 | at::AutoDispatchSkipFunctionalize guard; |
8674 | at::Tensor tmp_output = at::_ops::tan_out::call(self_, out_); |
8675 | return out;; |
8676 | } |
8677 | } else { |
8678 | at::Tensor tmp_output; |
8679 | { |
8680 | at::AutoDispatchSkipFunctionalize guard; |
8681 | tmp_output = at::_ops::tan::call(self_); |
8682 | } |
8683 | at::functionalization::impl::replace_(out, tmp_output); |
8684 | at::functionalization::impl::commit_update(out); |
8685 | at::functionalization::impl::sync(out); |
8686 | return out; |
8687 | } |
8688 | } |
8689 | |
8690 | at::Tensor & tan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8691 | if (true) { |
8692 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8693 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8694 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8695 | auto self_meta = to_meta(self); |
8696 | at::AutoDispatchSkipFunctionalize func_guard; |
8697 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8698 | at::_ops::tan_::call(self_meta); |
8699 | } |
8700 | |
8701 | at::Tensor self_; |
8702 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8703 | at::functionalization::impl::sync(self); |
8704 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8705 | } else { |
8706 | self_ = self; |
8707 | } |
8708 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8709 | if ((false)) { |
8710 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8711 | TORCH_INTERNAL_ASSERT(false, |
8712 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8713 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8714 | } else { |
8715 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8716 | at::AutoDispatchSkipFunctionalize guard; |
8717 | at::Tensor tmp_output = at::_ops::tan_::call(self_); |
8718 | return self;; |
8719 | } |
8720 | } else { |
8721 | at::Tensor tmp_output; |
8722 | { |
8723 | at::AutoDispatchSkipFunctionalize guard; |
8724 | tmp_output = at::_ops::tan::call(self_); |
8725 | } |
8726 | at::functionalization::impl::replace_(self, tmp_output); |
8727 | at::functionalization::impl::commit_update(self); |
8728 | at::functionalization::impl::sync(self); |
8729 | return self; |
8730 | } |
8731 | } |
8732 | |
8733 | at::Tensor & tensordot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) { |
8734 | if (false) { |
8735 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8736 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8737 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8738 | auto self_meta = to_meta(self); |
8739 | auto other_meta = to_meta(other); |
8740 | auto out_meta = to_meta(out); |
8741 | at::AutoDispatchSkipFunctionalize func_guard; |
8742 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8743 | at::_ops::tensordot_out::call(self_meta, other_meta, dims_self, dims_other, out_meta); |
8744 | } |
8745 | |
8746 | at::Tensor self_; |
8747 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8748 | at::functionalization::impl::sync(self); |
8749 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8750 | } else { |
8751 | self_ = self; |
8752 | } |
8753 | |
8754 | at::Tensor other_; |
8755 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
8756 | at::functionalization::impl::sync(other); |
8757 | other_ = at::functionalization::impl::from_functional_tensor(other); |
8758 | } else { |
8759 | other_ = other; |
8760 | } |
8761 | |
8762 | at::Tensor out_; |
8763 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8764 | at::functionalization::impl::sync(out); |
8765 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8766 | } else { |
8767 | out_ = out; |
8768 | } |
8769 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8770 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
8771 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8772 | TORCH_INTERNAL_ASSERT(false, |
8773 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8774 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8775 | } else { |
8776 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8777 | at::AutoDispatchSkipFunctionalize guard; |
8778 | at::Tensor tmp_output = at::_ops::tensordot_out::call(self_, other_, dims_self, dims_other, out_); |
8779 | return out;; |
8780 | } |
8781 | } else { |
8782 | at::Tensor tmp_output; |
8783 | { |
8784 | at::AutoDispatchSkipFunctionalize guard; |
8785 | tmp_output = at::_ops::tensordot::call(self_, other_, dims_self, dims_other); |
8786 | } |
8787 | at::functionalization::impl::replace_(out, tmp_output); |
8788 | at::functionalization::impl::commit_update(out); |
8789 | at::functionalization::impl::sync(out); |
8790 | return out; |
8791 | } |
8792 | } |
8793 | |
8794 | at::Tensor & threshold_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) { |
8795 | if (false) { |
8796 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8797 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8798 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8799 | auto self_meta = to_meta(self); |
8800 | auto out_meta = to_meta(out); |
8801 | at::AutoDispatchSkipFunctionalize func_guard; |
8802 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8803 | at::_ops::threshold_out::call(self_meta, threshold, value, out_meta); |
8804 | } |
8805 | |
8806 | at::Tensor self_; |
8807 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8808 | at::functionalization::impl::sync(self); |
8809 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8810 | } else { |
8811 | self_ = self; |
8812 | } |
8813 | |
8814 | at::Tensor out_; |
8815 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8816 | at::functionalization::impl::sync(out); |
8817 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8818 | } else { |
8819 | out_ = out; |
8820 | } |
8821 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8822 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8823 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8824 | TORCH_INTERNAL_ASSERT(false, |
8825 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8826 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8827 | } else { |
8828 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8829 | at::AutoDispatchSkipFunctionalize guard; |
8830 | at::Tensor tmp_output = at::_ops::threshold_out::call(self_, threshold, value, out_); |
8831 | return out;; |
8832 | } |
8833 | } else { |
8834 | at::Tensor tmp_output; |
8835 | { |
8836 | at::AutoDispatchSkipFunctionalize guard; |
8837 | tmp_output = at::_ops::threshold::call(self_, threshold, value); |
8838 | } |
8839 | at::functionalization::impl::replace_(out, tmp_output); |
8840 | at::functionalization::impl::commit_update(out); |
8841 | at::functionalization::impl::sync(out); |
8842 | return out; |
8843 | } |
8844 | } |
8845 | |
8846 | at::Tensor & threshold_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { |
8847 | if (true) { |
8848 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8849 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8850 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8851 | auto self_meta = to_meta(self); |
8852 | at::AutoDispatchSkipFunctionalize func_guard; |
8853 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8854 | at::_ops::threshold_::call(self_meta, threshold, value); |
8855 | } |
8856 | |
8857 | at::Tensor self_; |
8858 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8859 | at::functionalization::impl::sync(self); |
8860 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8861 | } else { |
8862 | self_ = self; |
8863 | } |
8864 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
8865 | if ((false)) { |
8866 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8867 | TORCH_INTERNAL_ASSERT(false, |
8868 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8869 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8870 | } else { |
8871 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8872 | at::AutoDispatchSkipFunctionalize guard; |
8873 | at::Tensor tmp_output = at::_ops::threshold_::call(self_, threshold, value); |
8874 | return self;; |
8875 | } |
8876 | } else { |
8877 | at::Tensor tmp_output; |
8878 | { |
8879 | at::AutoDispatchSkipFunctionalize guard; |
8880 | tmp_output = at::_ops::threshold::call(self_, threshold, value); |
8881 | } |
8882 | at::functionalization::impl::replace_(self, tmp_output); |
8883 | at::functionalization::impl::commit_update(self); |
8884 | at::functionalization::impl::sync(self); |
8885 | return self; |
8886 | } |
8887 | } |
8888 | |
8889 | at::Tensor & _nested_tensor_strides_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8890 | if (false) { |
8891 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8892 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8893 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8894 | auto self_meta = to_meta(self); |
8895 | auto out_meta = to_meta(out); |
8896 | at::AutoDispatchSkipFunctionalize func_guard; |
8897 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8898 | at::_ops::_nested_tensor_strides_out::call(self_meta, out_meta); |
8899 | } |
8900 | |
8901 | at::Tensor self_; |
8902 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8903 | at::functionalization::impl::sync(self); |
8904 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8905 | } else { |
8906 | self_ = self; |
8907 | } |
8908 | |
8909 | at::Tensor out_; |
8910 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8911 | at::functionalization::impl::sync(out); |
8912 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8913 | } else { |
8914 | out_ = out; |
8915 | } |
8916 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8917 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8918 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8919 | TORCH_INTERNAL_ASSERT(false, |
8920 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8921 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8922 | } else { |
8923 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8924 | at::AutoDispatchSkipFunctionalize guard; |
8925 | at::Tensor tmp_output = at::_ops::_nested_tensor_strides_out::call(self_, out_); |
8926 | return out;; |
8927 | } |
8928 | } else { |
8929 | at::Tensor tmp_output; |
8930 | { |
8931 | at::AutoDispatchSkipFunctionalize guard; |
8932 | tmp_output = at::_ops::_nested_tensor_strides::call(self_); |
8933 | } |
8934 | at::functionalization::impl::replace_(out, tmp_output); |
8935 | at::functionalization::impl::commit_update(out); |
8936 | at::functionalization::impl::sync(out); |
8937 | return out; |
8938 | } |
8939 | } |
8940 | |
8941 | at::Tensor & fix_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
8942 | if (false) { |
8943 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8944 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8945 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8946 | auto self_meta = to_meta(self); |
8947 | auto out_meta = to_meta(out); |
8948 | at::AutoDispatchSkipFunctionalize func_guard; |
8949 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
8950 | at::_ops::fix_out::call(self_meta, out_meta); |
8951 | } |
8952 | |
8953 | at::Tensor self_; |
8954 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
8955 | at::functionalization::impl::sync(self); |
8956 | self_ = at::functionalization::impl::from_functional_tensor(self); |
8957 | } else { |
8958 | self_ = self; |
8959 | } |
8960 | |
8961 | at::Tensor out_; |
8962 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
8963 | at::functionalization::impl::sync(out); |
8964 | out_ = at::functionalization::impl::from_functional_tensor(out); |
8965 | } else { |
8966 | out_ = out; |
8967 | } |
8968 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
8969 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
8970 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
8971 | TORCH_INTERNAL_ASSERT(false, |
8972 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
8973 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
8974 | } else { |
8975 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
8976 | at::AutoDispatchSkipFunctionalize guard; |
8977 | at::Tensor tmp_output = at::_ops::fix_out::call(self_, out_); |
8978 | return out;; |
8979 | } |
8980 | } else { |
8981 | at::Tensor tmp_output; |
8982 | { |
8983 | at::AutoDispatchSkipFunctionalize guard; |
8984 | tmp_output = at::_ops::fix::call(self_); |
8985 | } |
8986 | at::functionalization::impl::replace_(out, tmp_output); |
8987 | at::functionalization::impl::commit_update(out); |
8988 | at::functionalization::impl::sync(out); |
8989 | return out; |
8990 | } |
8991 | } |
8992 | |
8993 | at::Tensor & fix_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
8994 | if (true) { |
8995 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
8996 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
8997 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
8998 | auto self_meta = to_meta(self); |
8999 | at::AutoDispatchSkipFunctionalize func_guard; |
9000 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9001 | at::_ops::fix_::call(self_meta); |
9002 | } |
9003 | |
9004 | at::Tensor self_; |
9005 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9006 | at::functionalization::impl::sync(self); |
9007 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9008 | } else { |
9009 | self_ = self; |
9010 | } |
9011 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
9012 | if ((false)) { |
9013 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9014 | TORCH_INTERNAL_ASSERT(false, |
9015 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9016 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9017 | } else { |
9018 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9019 | at::AutoDispatchSkipFunctionalize guard; |
9020 | at::Tensor tmp_output = at::_ops::fix_::call(self_); |
9021 | return self;; |
9022 | } |
9023 | } else { |
9024 | at::Tensor tmp_output; |
9025 | { |
9026 | at::AutoDispatchSkipFunctionalize guard; |
9027 | tmp_output = at::_ops::fix::call(self_); |
9028 | } |
9029 | at::functionalization::impl::replace_(self, tmp_output); |
9030 | at::functionalization::impl::commit_update(self); |
9031 | at::functionalization::impl::sync(self); |
9032 | return self; |
9033 | } |
9034 | } |
9035 | |
9036 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> unique_consecutive_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { |
9037 | if (false) { |
9038 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9039 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9040 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9041 | auto self_meta = to_meta(self); |
9042 | auto out0_meta = to_meta(out0); |
9043 | auto out1_meta = to_meta(out1); |
9044 | auto out2_meta = to_meta(out2); |
9045 | at::AutoDispatchSkipFunctionalize func_guard; |
9046 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9047 | at::_ops::unique_consecutive_out::call(self_meta, return_inverse, return_counts, dim, out0_meta, out1_meta, out2_meta); |
9048 | } |
9049 | |
9050 | at::Tensor self_; |
9051 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9052 | at::functionalization::impl::sync(self); |
9053 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9054 | } else { |
9055 | self_ = self; |
9056 | } |
9057 | |
9058 | at::Tensor out0_; |
9059 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
9060 | at::functionalization::impl::sync(out0); |
9061 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
9062 | } else { |
9063 | out0_ = out0; |
9064 | } |
9065 | |
9066 | at::Tensor out1_; |
9067 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
9068 | at::functionalization::impl::sync(out1); |
9069 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
9070 | } else { |
9071 | out1_ = out1; |
9072 | } |
9073 | |
9074 | at::Tensor out2_; |
9075 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
9076 | at::functionalization::impl::sync(out2); |
9077 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
9078 | } else { |
9079 | out2_ = out2; |
9080 | } |
9081 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2))) { |
9082 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9083 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9084 | TORCH_INTERNAL_ASSERT(false, |
9085 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9086 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9087 | } else { |
9088 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9089 | at::AutoDispatchSkipFunctionalize guard; |
9090 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::unique_consecutive_out::call(self_, return_inverse, return_counts, dim, out0_, out1_, out2_); |
9091 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2);; |
9092 | } |
9093 | } else { |
9094 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
9095 | { |
9096 | at::AutoDispatchSkipFunctionalize guard; |
9097 | tmp_output = at::_ops::unique_consecutive::call(self_, return_inverse, return_counts, dim); |
9098 | } |
9099 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
9100 | at::functionalization::impl::commit_update(out0); |
9101 | at::functionalization::impl::sync(out0); |
9102 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
9103 | at::functionalization::impl::commit_update(out1); |
9104 | at::functionalization::impl::sync(out1); |
9105 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
9106 | at::functionalization::impl::commit_update(out2); |
9107 | at::functionalization::impl::sync(out2); |
9108 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2); |
9109 | } |
9110 | } |
9111 | |
9112 | at::Tensor & var_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { |
9113 | if (false) { |
9114 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9115 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9116 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9117 | auto self_meta = to_meta(self); |
9118 | auto out_meta = to_meta(out); |
9119 | at::AutoDispatchSkipFunctionalize func_guard; |
9120 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9121 | at::_ops::var_out::call(self_meta, dim, unbiased, keepdim, out_meta); |
9122 | } |
9123 | |
9124 | at::Tensor self_; |
9125 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9126 | at::functionalization::impl::sync(self); |
9127 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9128 | } else { |
9129 | self_ = self; |
9130 | } |
9131 | |
9132 | at::Tensor out_; |
9133 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9134 | at::functionalization::impl::sync(out); |
9135 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9136 | } else { |
9137 | out_ = out; |
9138 | } |
9139 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9140 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9141 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9142 | TORCH_INTERNAL_ASSERT(false, |
9143 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9144 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9145 | } else { |
9146 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9147 | at::AutoDispatchSkipFunctionalize guard; |
9148 | at::Tensor tmp_output = at::_ops::var_out::call(self_, dim, unbiased, keepdim, out_); |
9149 | return out;; |
9150 | } |
9151 | } else { |
9152 | at::Tensor tmp_output; |
9153 | { |
9154 | at::AutoDispatchSkipFunctionalize guard; |
9155 | tmp_output = at::_ops::var_dim::call(self_, dim, unbiased, keepdim); |
9156 | } |
9157 | at::functionalization::impl::replace_(out, tmp_output); |
9158 | at::functionalization::impl::commit_update(out); |
9159 | at::functionalization::impl::sync(out); |
9160 | return out; |
9161 | } |
9162 | } |
9163 | |
9164 | at::Tensor & var_out_correction_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
9165 | if (false) { |
9166 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9167 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9168 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9169 | auto self_meta = to_meta(self); |
9170 | auto out_meta = to_meta(out); |
9171 | at::AutoDispatchSkipFunctionalize func_guard; |
9172 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9173 | at::_ops::var_correction_out::call(self_meta, dim, correction, keepdim, out_meta); |
9174 | } |
9175 | |
9176 | at::Tensor self_; |
9177 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9178 | at::functionalization::impl::sync(self); |
9179 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9180 | } else { |
9181 | self_ = self; |
9182 | } |
9183 | |
9184 | at::Tensor out_; |
9185 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9186 | at::functionalization::impl::sync(out); |
9187 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9188 | } else { |
9189 | out_ = out; |
9190 | } |
9191 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9192 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9193 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9194 | TORCH_INTERNAL_ASSERT(false, |
9195 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9196 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9197 | } else { |
9198 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9199 | at::AutoDispatchSkipFunctionalize guard; |
9200 | at::Tensor tmp_output = at::_ops::var_correction_out::call(self_, dim, correction, keepdim, out_); |
9201 | return out;; |
9202 | } |
9203 | } else { |
9204 | at::Tensor tmp_output; |
9205 | { |
9206 | at::AutoDispatchSkipFunctionalize guard; |
9207 | tmp_output = at::_ops::var_correction::call(self_, dim, correction, keepdim); |
9208 | } |
9209 | at::functionalization::impl::replace_(out, tmp_output); |
9210 | at::functionalization::impl::commit_update(out); |
9211 | at::functionalization::impl::sync(out); |
9212 | return out; |
9213 | } |
9214 | } |
9215 | |
9216 | at::Tensor & var_out_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { |
9217 | if (false) { |
9218 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9219 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9220 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9221 | auto self_meta = to_meta(self); |
9222 | auto out_meta = to_meta(out); |
9223 | at::AutoDispatchSkipFunctionalize func_guard; |
9224 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9225 | at::_ops::var_names_out::call(self_meta, dim, unbiased, keepdim, out_meta); |
9226 | } |
9227 | |
9228 | at::Tensor self_; |
9229 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9230 | at::functionalization::impl::sync(self); |
9231 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9232 | } else { |
9233 | self_ = self; |
9234 | } |
9235 | |
9236 | at::Tensor out_; |
9237 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9238 | at::functionalization::impl::sync(out); |
9239 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9240 | } else { |
9241 | out_ = out; |
9242 | } |
9243 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9244 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9245 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9246 | TORCH_INTERNAL_ASSERT(false, |
9247 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9248 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9249 | } else { |
9250 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9251 | at::AutoDispatchSkipFunctionalize guard; |
9252 | at::Tensor tmp_output = at::_ops::var_names_out::call(self_, dim, unbiased, keepdim, out_); |
9253 | return out;; |
9254 | } |
9255 | } else { |
9256 | at::Tensor tmp_output; |
9257 | { |
9258 | at::AutoDispatchSkipFunctionalize guard; |
9259 | tmp_output = at::_ops::var_names_dim::call(self_, dim, unbiased, keepdim); |
9260 | } |
9261 | at::functionalization::impl::replace_(out, tmp_output); |
9262 | at::functionalization::impl::commit_update(out); |
9263 | at::functionalization::impl::sync(out); |
9264 | return out; |
9265 | } |
9266 | } |
9267 | |
9268 | at::Tensor & var_out_correction_names_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out) { |
9269 | if (false) { |
9270 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9271 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9272 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9273 | auto self_meta = to_meta(self); |
9274 | auto out_meta = to_meta(out); |
9275 | at::AutoDispatchSkipFunctionalize func_guard; |
9276 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9277 | at::_ops::var_correction_names_out::call(self_meta, dim, correction, keepdim, out_meta); |
9278 | } |
9279 | |
9280 | at::Tensor self_; |
9281 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9282 | at::functionalization::impl::sync(self); |
9283 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9284 | } else { |
9285 | self_ = self; |
9286 | } |
9287 | |
9288 | at::Tensor out_; |
9289 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9290 | at::functionalization::impl::sync(out); |
9291 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9292 | } else { |
9293 | out_ = out; |
9294 | } |
9295 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9296 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9297 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9298 | TORCH_INTERNAL_ASSERT(false, |
9299 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9300 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9301 | } else { |
9302 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9303 | at::AutoDispatchSkipFunctionalize guard; |
9304 | at::Tensor tmp_output = at::_ops::var_correction_names_out::call(self_, dim, correction, keepdim, out_); |
9305 | return out;; |
9306 | } |
9307 | } else { |
9308 | at::Tensor tmp_output; |
9309 | { |
9310 | at::AutoDispatchSkipFunctionalize guard; |
9311 | tmp_output = at::_ops::var_correction_names::call(self_, dim, correction, keepdim); |
9312 | } |
9313 | at::functionalization::impl::replace_(out, tmp_output); |
9314 | at::functionalization::impl::commit_update(out); |
9315 | at::functionalization::impl::sync(out); |
9316 | return out; |
9317 | } |
9318 | } |
9319 | |
9320 | at::Tensor & _standard_gamma_grad_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { |
9321 | if (false) { |
9322 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9323 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9324 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9325 | auto self_meta = to_meta(self); |
9326 | auto output_meta = to_meta(output); |
9327 | auto out_meta = to_meta(out); |
9328 | at::AutoDispatchSkipFunctionalize func_guard; |
9329 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9330 | at::_ops::_standard_gamma_grad_out::call(self_meta, output_meta, out_meta); |
9331 | } |
9332 | |
9333 | at::Tensor self_; |
9334 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9335 | at::functionalization::impl::sync(self); |
9336 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9337 | } else { |
9338 | self_ = self; |
9339 | } |
9340 | |
9341 | at::Tensor output_; |
9342 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
9343 | at::functionalization::impl::sync(output); |
9344 | output_ = at::functionalization::impl::from_functional_tensor(output); |
9345 | } else { |
9346 | output_ = output; |
9347 | } |
9348 | |
9349 | at::Tensor out_; |
9350 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9351 | at::functionalization::impl::sync(out); |
9352 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9353 | } else { |
9354 | out_ = out; |
9355 | } |
9356 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9357 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(output))) { |
9358 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9359 | TORCH_INTERNAL_ASSERT(false, |
9360 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9361 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9362 | } else { |
9363 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9364 | at::AutoDispatchSkipFunctionalize guard; |
9365 | at::Tensor tmp_output = at::_ops::_standard_gamma_grad_out::call(self_, output_, out_); |
9366 | return out;; |
9367 | } |
9368 | } else { |
9369 | at::Tensor tmp_output; |
9370 | { |
9371 | at::AutoDispatchSkipFunctionalize guard; |
9372 | tmp_output = at::_ops::_standard_gamma_grad::call(self_, output_); |
9373 | } |
9374 | at::functionalization::impl::replace_(out, tmp_output); |
9375 | at::functionalization::impl::commit_update(out); |
9376 | at::functionalization::impl::sync(out); |
9377 | return out; |
9378 | } |
9379 | } |
9380 | |
9381 | at::Tensor & poisson_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out) { |
9382 | if (false) { |
9383 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9384 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9385 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9386 | auto self_meta = to_meta(self); |
9387 | auto out_meta = to_meta(out); |
9388 | at::AutoDispatchSkipFunctionalize func_guard; |
9389 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9390 | at::_ops::poisson_out::call(self_meta, generator, out_meta); |
9391 | } |
9392 | |
9393 | at::Tensor self_; |
9394 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9395 | at::functionalization::impl::sync(self); |
9396 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9397 | } else { |
9398 | self_ = self; |
9399 | } |
9400 | |
9401 | at::Tensor out_; |
9402 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9403 | at::functionalization::impl::sync(out); |
9404 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9405 | } else { |
9406 | out_ = out; |
9407 | } |
9408 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9409 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9410 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9411 | TORCH_INTERNAL_ASSERT(false, |
9412 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9413 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9414 | } else { |
9415 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9416 | at::AutoDispatchSkipFunctionalize guard; |
9417 | at::Tensor tmp_output = at::_ops::poisson_out::call(self_, generator, out_); |
9418 | return out;; |
9419 | } |
9420 | } else { |
9421 | at::Tensor tmp_output; |
9422 | { |
9423 | at::AutoDispatchSkipFunctionalize guard; |
9424 | tmp_output = at::_ops::poisson::call(self_, generator); |
9425 | } |
9426 | at::functionalization::impl::replace_(out, tmp_output); |
9427 | at::functionalization::impl::commit_update(out); |
9428 | at::functionalization::impl::sync(out); |
9429 | return out; |
9430 | } |
9431 | } |
9432 | |
9433 | at::Tensor & _sparse_csr_sum_out_dim_dtype_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) { |
9434 | if (false) { |
9435 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9436 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9437 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9438 | auto self_meta = to_meta(self); |
9439 | auto out_meta = to_meta(out); |
9440 | at::AutoDispatchSkipFunctionalize func_guard; |
9441 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9442 | at::_ops::_sparse_csr_sum_dim_dtype_out::call(self_meta, dim, keepdim, dtype, out_meta); |
9443 | } |
9444 | |
9445 | at::Tensor self_; |
9446 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9447 | at::functionalization::impl::sync(self); |
9448 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9449 | } else { |
9450 | self_ = self; |
9451 | } |
9452 | |
9453 | at::Tensor out_; |
9454 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9455 | at::functionalization::impl::sync(out); |
9456 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9457 | } else { |
9458 | out_ = out; |
9459 | } |
9460 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9461 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9462 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9463 | TORCH_INTERNAL_ASSERT(false, |
9464 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9465 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9466 | } else { |
9467 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9468 | at::AutoDispatchSkipFunctionalize guard; |
9469 | at::Tensor tmp_output = at::_ops::_sparse_csr_sum_dim_dtype_out::call(self_, dim, keepdim, dtype, out_); |
9470 | return out;; |
9471 | } |
9472 | } else { |
9473 | at::Tensor tmp_output; |
9474 | { |
9475 | at::AutoDispatchSkipFunctionalize guard; |
9476 | tmp_output = at::_ops::_sparse_csr_sum_dim_dtype::call(self_, dim, keepdim, dtype); |
9477 | } |
9478 | at::functionalization::impl::replace_(out, tmp_output); |
9479 | at::functionalization::impl::commit_update(out); |
9480 | at::functionalization::impl::sync(out); |
9481 | return out; |
9482 | } |
9483 | } |
9484 | |
9485 | at::Tensor & _sparse_softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
9486 | if (false) { |
9487 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9488 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9489 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9490 | auto grad_output_meta = to_meta(grad_output); |
9491 | auto output_meta = to_meta(output); |
9492 | auto self_meta = to_meta(self); |
9493 | auto out_meta = to_meta(out); |
9494 | at::AutoDispatchSkipFunctionalize func_guard; |
9495 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9496 | at::_ops::_sparse_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, self_meta, out_meta); |
9497 | } |
9498 | |
9499 | at::Tensor grad_output_; |
9500 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
9501 | at::functionalization::impl::sync(grad_output); |
9502 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
9503 | } else { |
9504 | grad_output_ = grad_output; |
9505 | } |
9506 | |
9507 | at::Tensor output_; |
9508 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
9509 | at::functionalization::impl::sync(output); |
9510 | output_ = at::functionalization::impl::from_functional_tensor(output); |
9511 | } else { |
9512 | output_ = output; |
9513 | } |
9514 | |
9515 | at::Tensor self_; |
9516 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9517 | at::functionalization::impl::sync(self); |
9518 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9519 | } else { |
9520 | self_ = self; |
9521 | } |
9522 | |
9523 | at::Tensor out_; |
9524 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9525 | at::functionalization::impl::sync(out); |
9526 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9527 | } else { |
9528 | out_ = out; |
9529 | } |
9530 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9531 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(self))) { |
9532 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9533 | TORCH_INTERNAL_ASSERT(false, |
9534 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9535 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9536 | } else { |
9537 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9538 | at::AutoDispatchSkipFunctionalize guard; |
9539 | at::Tensor tmp_output = at::_ops::_sparse_softmax_backward_data_out::call(grad_output_, output_, dim, self_, out_); |
9540 | return out;; |
9541 | } |
9542 | } else { |
9543 | at::Tensor tmp_output; |
9544 | { |
9545 | at::AutoDispatchSkipFunctionalize guard; |
9546 | tmp_output = at::_ops::_sparse_softmax_backward_data::call(grad_output_, output_, dim, self_); |
9547 | } |
9548 | at::functionalization::impl::replace_(out, tmp_output); |
9549 | at::functionalization::impl::commit_update(out); |
9550 | at::functionalization::impl::sync(out); |
9551 | return out; |
9552 | } |
9553 | } |
9554 | |
9555 | at::Tensor & _sparse_log_softmax_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { |
9556 | if (false) { |
9557 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9558 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9559 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9560 | auto self_meta = to_meta(self); |
9561 | auto out_meta = to_meta(out); |
9562 | at::AutoDispatchSkipFunctionalize func_guard; |
9563 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9564 | at::_ops::_sparse_log_softmax_out::call(self_meta, dim, half_to_float, out_meta); |
9565 | } |
9566 | |
9567 | at::Tensor self_; |
9568 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9569 | at::functionalization::impl::sync(self); |
9570 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9571 | } else { |
9572 | self_ = self; |
9573 | } |
9574 | |
9575 | at::Tensor out_; |
9576 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9577 | at::functionalization::impl::sync(out); |
9578 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9579 | } else { |
9580 | out_ = out; |
9581 | } |
9582 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9583 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9584 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9585 | TORCH_INTERNAL_ASSERT(false, |
9586 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9587 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9588 | } else { |
9589 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9590 | at::AutoDispatchSkipFunctionalize guard; |
9591 | at::Tensor tmp_output = at::_ops::_sparse_log_softmax_out::call(self_, dim, half_to_float, out_); |
9592 | return out;; |
9593 | } |
9594 | } else { |
9595 | at::Tensor tmp_output; |
9596 | { |
9597 | at::AutoDispatchSkipFunctionalize guard; |
9598 | tmp_output = at::_ops::_sparse_log_softmax::call(self_, dim, half_to_float); |
9599 | } |
9600 | at::functionalization::impl::replace_(out, tmp_output); |
9601 | at::functionalization::impl::commit_update(out); |
9602 | at::functionalization::impl::sync(out); |
9603 | return out; |
9604 | } |
9605 | } |
9606 | |
9607 | at::Tensor & _sparse_log_softmax_backward_data_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { |
9608 | if (false) { |
9609 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9610 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9611 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9612 | auto grad_output_meta = to_meta(grad_output); |
9613 | auto output_meta = to_meta(output); |
9614 | auto self_meta = to_meta(self); |
9615 | auto out_meta = to_meta(out); |
9616 | at::AutoDispatchSkipFunctionalize func_guard; |
9617 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9618 | at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output_meta, output_meta, dim, self_meta, out_meta); |
9619 | } |
9620 | |
9621 | at::Tensor grad_output_; |
9622 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
9623 | at::functionalization::impl::sync(grad_output); |
9624 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
9625 | } else { |
9626 | grad_output_ = grad_output; |
9627 | } |
9628 | |
9629 | at::Tensor output_; |
9630 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
9631 | at::functionalization::impl::sync(output); |
9632 | output_ = at::functionalization::impl::from_functional_tensor(output); |
9633 | } else { |
9634 | output_ = output; |
9635 | } |
9636 | |
9637 | at::Tensor self_; |
9638 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9639 | at::functionalization::impl::sync(self); |
9640 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9641 | } else { |
9642 | self_ = self; |
9643 | } |
9644 | |
9645 | at::Tensor out_; |
9646 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9647 | at::functionalization::impl::sync(out); |
9648 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9649 | } else { |
9650 | out_ = out; |
9651 | } |
9652 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9653 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(self))) { |
9654 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9655 | TORCH_INTERNAL_ASSERT(false, |
9656 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9657 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9658 | } else { |
9659 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9660 | at::AutoDispatchSkipFunctionalize guard; |
9661 | at::Tensor tmp_output = at::_ops::_sparse_log_softmax_backward_data_out::call(grad_output_, output_, dim, self_, out_); |
9662 | return out;; |
9663 | } |
9664 | } else { |
9665 | at::Tensor tmp_output; |
9666 | { |
9667 | at::AutoDispatchSkipFunctionalize guard; |
9668 | tmp_output = at::_ops::_sparse_log_softmax_backward_data::call(grad_output_, output_, dim, self_); |
9669 | } |
9670 | at::functionalization::impl::replace_(out, tmp_output); |
9671 | at::functionalization::impl::commit_update(out); |
9672 | at::functionalization::impl::sync(out); |
9673 | return out; |
9674 | } |
9675 | } |
9676 | |
9677 | const at::Tensor & resize_as_sparse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) { |
9678 | if (false) { |
9679 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9680 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9681 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9682 | auto self_meta = to_meta(self); |
9683 | auto the_template_meta = to_meta(the_template); |
9684 | auto out_meta = to_meta(out); |
9685 | at::AutoDispatchSkipFunctionalize func_guard; |
9686 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9687 | at::_ops::resize_as_sparse_out::call(self_meta, the_template_meta, out_meta); |
9688 | } |
9689 | |
9690 | at::Tensor self_; |
9691 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9692 | at::functionalization::impl::sync(self); |
9693 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9694 | } else { |
9695 | self_ = self; |
9696 | } |
9697 | |
9698 | at::Tensor the_template_; |
9699 | if (at::functionalization::impl::isFunctionalTensor(the_template)) { |
9700 | at::functionalization::impl::sync(the_template); |
9701 | the_template_ = at::functionalization::impl::from_functional_tensor(the_template); |
9702 | } else { |
9703 | the_template_ = the_template; |
9704 | } |
9705 | |
9706 | at::Tensor out_; |
9707 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9708 | at::functionalization::impl::sync(out); |
9709 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9710 | } else { |
9711 | out_ = out; |
9712 | } |
9713 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9714 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(the_template))) { |
9715 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9716 | TORCH_INTERNAL_ASSERT(false, |
9717 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9718 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9719 | } else { |
9720 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9721 | at::AutoDispatchSkipFunctionalize guard; |
9722 | at::Tensor tmp_output = at::_ops::resize_as_sparse_out::call(self_, the_template_, out_); |
9723 | return out;; |
9724 | } |
9725 | } else { |
9726 | at::Tensor tmp_output; |
9727 | { |
9728 | at::AutoDispatchSkipFunctionalize guard; |
9729 | tmp_output = at::_ops::resize_as_sparse::call(self_, the_template_); |
9730 | } |
9731 | at::functionalization::impl::replace_(out, tmp_output); |
9732 | at::functionalization::impl::commit_update(out); |
9733 | at::functionalization::impl::sync(out); |
9734 | return out; |
9735 | } |
9736 | } |
9737 | |
9738 | const at::Tensor & resize_as_sparse_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) { |
9739 | if (true) { |
9740 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9741 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9742 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9743 | auto self_meta = to_meta(self); |
9744 | auto the_template_meta = to_meta(the_template); |
9745 | at::AutoDispatchSkipFunctionalize func_guard; |
9746 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9747 | at::_ops::resize_as_sparse_::call(self_meta, the_template_meta); |
9748 | } |
9749 | |
9750 | at::Tensor self_; |
9751 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9752 | at::functionalization::impl::sync(self); |
9753 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9754 | } else { |
9755 | self_ = self; |
9756 | } |
9757 | |
9758 | at::Tensor the_template_; |
9759 | if (at::functionalization::impl::isFunctionalTensor(the_template)) { |
9760 | at::functionalization::impl::sync(the_template); |
9761 | the_template_ = at::functionalization::impl::from_functional_tensor(the_template); |
9762 | } else { |
9763 | the_template_ = the_template; |
9764 | } |
9765 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
9766 | if ((false || at::functionalization::impl::isFunctionalTensor(the_template))) { |
9767 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9768 | TORCH_INTERNAL_ASSERT(false, |
9769 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9770 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9771 | } else { |
9772 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9773 | at::AutoDispatchSkipFunctionalize guard; |
9774 | at::Tensor tmp_output = at::_ops::resize_as_sparse_::call(self_, the_template_); |
9775 | return self;; |
9776 | } |
9777 | } else { |
9778 | at::Tensor tmp_output; |
9779 | { |
9780 | at::AutoDispatchSkipFunctionalize guard; |
9781 | tmp_output = at::_ops::resize_as_sparse::call(self_, the_template_); |
9782 | } |
9783 | at::functionalization::impl::replace_(self, tmp_output); |
9784 | at::functionalization::impl::commit_update(self); |
9785 | at::functionalization::impl::sync(self); |
9786 | return self; |
9787 | } |
9788 | } |
9789 | |
9790 | at::Tensor & sub_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { |
9791 | if (false) { |
9792 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9793 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9794 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9795 | auto self_meta = to_meta(self); |
9796 | auto other_meta = to_meta(other); |
9797 | auto out_meta = to_meta(out); |
9798 | at::AutoDispatchSkipFunctionalize func_guard; |
9799 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9800 | at::_ops::sub_out::call(self_meta, other_meta, alpha, out_meta); |
9801 | } |
9802 | |
9803 | at::Tensor self_; |
9804 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9805 | at::functionalization::impl::sync(self); |
9806 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9807 | } else { |
9808 | self_ = self; |
9809 | } |
9810 | |
9811 | at::Tensor other_; |
9812 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
9813 | at::functionalization::impl::sync(other); |
9814 | other_ = at::functionalization::impl::from_functional_tensor(other); |
9815 | } else { |
9816 | other_ = other; |
9817 | } |
9818 | |
9819 | at::Tensor out_; |
9820 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9821 | at::functionalization::impl::sync(out); |
9822 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9823 | } else { |
9824 | out_ = out; |
9825 | } |
9826 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9827 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
9828 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9829 | TORCH_INTERNAL_ASSERT(false, |
9830 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9831 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9832 | } else { |
9833 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9834 | at::AutoDispatchSkipFunctionalize guard; |
9835 | at::Tensor tmp_output = at::_ops::sub_out::call(self_, other_, alpha, out_); |
9836 | return out;; |
9837 | } |
9838 | } else { |
9839 | at::Tensor tmp_output; |
9840 | { |
9841 | at::AutoDispatchSkipFunctionalize guard; |
9842 | tmp_output = at::_ops::sub_Tensor::call(self_, other_, alpha); |
9843 | } |
9844 | at::functionalization::impl::replace_(out, tmp_output); |
9845 | at::functionalization::impl::commit_update(out); |
9846 | at::functionalization::impl::sync(out); |
9847 | return out; |
9848 | } |
9849 | } |
9850 | |
9851 | at::Tensor & sub__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { |
9852 | if (true) { |
9853 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9854 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9855 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9856 | auto self_meta = to_meta(self); |
9857 | auto other_meta = to_meta(other); |
9858 | at::AutoDispatchSkipFunctionalize func_guard; |
9859 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9860 | at::_ops::sub__Tensor::call(self_meta, other_meta, alpha); |
9861 | } |
9862 | |
9863 | at::Tensor self_; |
9864 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9865 | at::functionalization::impl::sync(self); |
9866 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9867 | } else { |
9868 | self_ = self; |
9869 | } |
9870 | |
9871 | at::Tensor other_; |
9872 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
9873 | at::functionalization::impl::sync(other); |
9874 | other_ = at::functionalization::impl::from_functional_tensor(other); |
9875 | } else { |
9876 | other_ = other; |
9877 | } |
9878 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
9879 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
9880 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9881 | TORCH_INTERNAL_ASSERT(false, |
9882 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9883 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9884 | } else { |
9885 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9886 | at::AutoDispatchSkipFunctionalize guard; |
9887 | at::Tensor tmp_output = at::_ops::sub__Tensor::call(self_, other_, alpha); |
9888 | return self;; |
9889 | } |
9890 | } else { |
9891 | at::Tensor tmp_output; |
9892 | { |
9893 | at::AutoDispatchSkipFunctionalize guard; |
9894 | tmp_output = at::_ops::sub_Tensor::call(self_, other_, alpha); |
9895 | } |
9896 | at::functionalization::impl::replace_(self, tmp_output); |
9897 | at::functionalization::impl::commit_update(self); |
9898 | at::functionalization::impl::sync(self); |
9899 | return self; |
9900 | } |
9901 | } |
9902 | |
9903 | at::Tensor & sub_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { |
9904 | if (false) { |
9905 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9906 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9907 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9908 | auto self_meta = to_meta(self); |
9909 | auto out_meta = to_meta(out); |
9910 | at::AutoDispatchSkipFunctionalize func_guard; |
9911 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9912 | at::_ops::sub_Scalar_out::call(self_meta, other, alpha, out_meta); |
9913 | } |
9914 | |
9915 | at::Tensor self_; |
9916 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9917 | at::functionalization::impl::sync(self); |
9918 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9919 | } else { |
9920 | self_ = self; |
9921 | } |
9922 | |
9923 | at::Tensor out_; |
9924 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
9925 | at::functionalization::impl::sync(out); |
9926 | out_ = at::functionalization::impl::from_functional_tensor(out); |
9927 | } else { |
9928 | out_ = out; |
9929 | } |
9930 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
9931 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
9932 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9933 | TORCH_INTERNAL_ASSERT(false, |
9934 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9935 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9936 | } else { |
9937 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9938 | at::AutoDispatchSkipFunctionalize guard; |
9939 | at::Tensor tmp_output = at::_ops::sub_Scalar_out::call(self_, other, alpha, out_); |
9940 | return out;; |
9941 | } |
9942 | } else { |
9943 | at::Tensor tmp_output; |
9944 | { |
9945 | at::AutoDispatchSkipFunctionalize guard; |
9946 | tmp_output = at::_ops::sub_Scalar::call(self_, other, alpha); |
9947 | } |
9948 | at::functionalization::impl::replace_(out, tmp_output); |
9949 | at::functionalization::impl::commit_update(out); |
9950 | at::functionalization::impl::sync(out); |
9951 | return out; |
9952 | } |
9953 | } |
9954 | |
9955 | at::Tensor & sub__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { |
9956 | if (true) { |
9957 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
9958 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
9959 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
9960 | auto self_meta = to_meta(self); |
9961 | at::AutoDispatchSkipFunctionalize func_guard; |
9962 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
9963 | at::_ops::sub__Scalar::call(self_meta, other, alpha); |
9964 | } |
9965 | |
9966 | at::Tensor self_; |
9967 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
9968 | at::functionalization::impl::sync(self); |
9969 | self_ = at::functionalization::impl::from_functional_tensor(self); |
9970 | } else { |
9971 | self_ = self; |
9972 | } |
9973 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
9974 | if ((false)) { |
9975 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
9976 | TORCH_INTERNAL_ASSERT(false, |
9977 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
9978 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
9979 | } else { |
9980 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
9981 | at::AutoDispatchSkipFunctionalize guard; |
9982 | at::Tensor tmp_output = at::_ops::sub__Scalar::call(self_, other, alpha); |
9983 | return self;; |
9984 | } |
9985 | } else { |
9986 | at::Tensor tmp_output; |
9987 | { |
9988 | at::AutoDispatchSkipFunctionalize guard; |
9989 | tmp_output = at::_ops::sub_Scalar::call(self_, other, alpha); |
9990 | } |
9991 | at::functionalization::impl::replace_(self, tmp_output); |
9992 | at::functionalization::impl::commit_update(self); |
9993 | at::functionalization::impl::sync(self); |
9994 | return self; |
9995 | } |
9996 | } |
9997 | |
9998 | at::Tensor & copy_sparse_to_sparse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { |
9999 | if (false) { |
10000 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10001 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10002 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10003 | auto self_meta = to_meta(self); |
10004 | auto src_meta = to_meta(src); |
10005 | auto out_meta = to_meta(out); |
10006 | at::AutoDispatchSkipFunctionalize func_guard; |
10007 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10008 | at::_ops::copy_sparse_to_sparse_out::call(self_meta, src_meta, non_blocking, out_meta); |
10009 | } |
10010 | |
10011 | at::Tensor self_; |
10012 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10013 | at::functionalization::impl::sync(self); |
10014 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10015 | } else { |
10016 | self_ = self; |
10017 | } |
10018 | |
10019 | at::Tensor src_; |
10020 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
10021 | at::functionalization::impl::sync(src); |
10022 | src_ = at::functionalization::impl::from_functional_tensor(src); |
10023 | } else { |
10024 | src_ = src; |
10025 | } |
10026 | |
10027 | at::Tensor out_; |
10028 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10029 | at::functionalization::impl::sync(out); |
10030 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10031 | } else { |
10032 | out_ = out; |
10033 | } |
10034 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10035 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(src))) { |
10036 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10037 | TORCH_INTERNAL_ASSERT(false, |
10038 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10039 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10040 | } else { |
10041 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10042 | at::AutoDispatchSkipFunctionalize guard; |
10043 | at::Tensor tmp_output = at::_ops::copy_sparse_to_sparse_out::call(self_, src_, non_blocking, out_); |
10044 | return out;; |
10045 | } |
10046 | } else { |
10047 | at::Tensor tmp_output; |
10048 | { |
10049 | at::AutoDispatchSkipFunctionalize guard; |
10050 | tmp_output = at::_ops::copy_sparse_to_sparse::call(self_, src_, non_blocking); |
10051 | } |
10052 | at::functionalization::impl::replace_(out, tmp_output); |
10053 | at::functionalization::impl::commit_update(out); |
10054 | at::functionalization::impl::sync(out); |
10055 | return out; |
10056 | } |
10057 | } |
10058 | |
10059 | at::Tensor & copy_sparse_to_sparse_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking) { |
10060 | if (true) { |
10061 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10062 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10063 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10064 | auto self_meta = to_meta(self); |
10065 | auto src_meta = to_meta(src); |
10066 | at::AutoDispatchSkipFunctionalize func_guard; |
10067 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10068 | at::_ops::copy_sparse_to_sparse_::call(self_meta, src_meta, non_blocking); |
10069 | } |
10070 | |
10071 | at::Tensor self_; |
10072 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10073 | at::functionalization::impl::sync(self); |
10074 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10075 | } else { |
10076 | self_ = self; |
10077 | } |
10078 | |
10079 | at::Tensor src_; |
10080 | if (at::functionalization::impl::isFunctionalTensor(src)) { |
10081 | at::functionalization::impl::sync(src); |
10082 | src_ = at::functionalization::impl::from_functional_tensor(src); |
10083 | } else { |
10084 | src_ = src; |
10085 | } |
10086 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10087 | if ((false || at::functionalization::impl::isFunctionalTensor(src))) { |
10088 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10089 | TORCH_INTERNAL_ASSERT(false, |
10090 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10091 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10092 | } else { |
10093 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10094 | at::AutoDispatchSkipFunctionalize guard; |
10095 | at::Tensor tmp_output = at::_ops::copy_sparse_to_sparse_::call(self_, src_, non_blocking); |
10096 | return self;; |
10097 | } |
10098 | } else { |
10099 | at::Tensor tmp_output; |
10100 | { |
10101 | at::AutoDispatchSkipFunctionalize guard; |
10102 | tmp_output = at::_ops::copy_sparse_to_sparse::call(self_, src_, non_blocking); |
10103 | } |
10104 | at::functionalization::impl::replace_(self, tmp_output); |
10105 | at::functionalization::impl::commit_update(self); |
10106 | at::functionalization::impl::sync(self); |
10107 | return self; |
10108 | } |
10109 | } |
10110 | |
10111 | at::Tensor & mkldnn_reorder_conv2d_weight_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { |
10112 | if (false) { |
10113 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10114 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10115 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10116 | auto self_meta = to_meta(self); |
10117 | auto out_meta = to_meta(out); |
10118 | at::AutoDispatchSkipFunctionalize func_guard; |
10119 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10120 | at::_ops::mkldnn_reorder_conv2d_weight_out::call(self_meta, padding, stride, dilation, groups, input_size, out_meta); |
10121 | } |
10122 | |
10123 | at::Tensor self_; |
10124 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10125 | at::functionalization::impl::sync(self); |
10126 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10127 | } else { |
10128 | self_ = self; |
10129 | } |
10130 | |
10131 | at::Tensor out_; |
10132 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10133 | at::functionalization::impl::sync(out); |
10134 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10135 | } else { |
10136 | out_ = out; |
10137 | } |
10138 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10139 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10140 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10141 | TORCH_INTERNAL_ASSERT(false, |
10142 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10143 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10144 | } else { |
10145 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10146 | at::AutoDispatchSkipFunctionalize guard; |
10147 | at::Tensor tmp_output = at::_ops::mkldnn_reorder_conv2d_weight_out::call(self_, padding, stride, dilation, groups, input_size, out_); |
10148 | return out;; |
10149 | } |
10150 | } else { |
10151 | at::Tensor tmp_output; |
10152 | { |
10153 | at::AutoDispatchSkipFunctionalize guard; |
10154 | tmp_output = at::_ops::mkldnn_reorder_conv2d_weight::call(self_, padding, stride, dilation, groups, input_size); |
10155 | } |
10156 | at::functionalization::impl::replace_(out, tmp_output); |
10157 | at::functionalization::impl::commit_update(out); |
10158 | at::functionalization::impl::sync(out); |
10159 | return out; |
10160 | } |
10161 | } |
10162 | |
10163 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _lstm_mps_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
10164 | if (false) { |
10165 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10166 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10167 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10168 | auto input_meta = to_meta(input); |
10169 | auto hx_meta = to_meta(hx); |
10170 | auto params_meta = to_meta(params); |
10171 | auto out0_meta = to_meta(out0); |
10172 | auto out1_meta = to_meta(out1); |
10173 | auto out2_meta = to_meta(out2); |
10174 | auto out3_meta = to_meta(out3); |
10175 | auto out4_meta = to_meta(out4); |
10176 | at::AutoDispatchSkipFunctionalize func_guard; |
10177 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10178 | at::_ops::_lstm_mps_out::call(input_meta, hx_meta, params_meta, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta); |
10179 | } |
10180 | |
10181 | at::Tensor input_; |
10182 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
10183 | at::functionalization::impl::sync(input); |
10184 | input_ = at::functionalization::impl::from_functional_tensor(input); |
10185 | } else { |
10186 | input_ = input; |
10187 | } |
10188 | |
10189 | ::std::vector<at::Tensor> hx_; |
10190 | if (at::functionalization::impl::isFunctionalTensor(hx)) { |
10191 | at::functionalization::impl::sync(hx); |
10192 | hx_ = at::functionalization::impl::from_functional_tensor(hx); |
10193 | } else { |
10194 | hx_ = hx.vec(); |
10195 | } |
10196 | |
10197 | ::std::vector<at::Tensor> params_; |
10198 | if (at::functionalization::impl::isFunctionalTensor(params)) { |
10199 | at::functionalization::impl::sync(params); |
10200 | params_ = at::functionalization::impl::from_functional_tensor(params); |
10201 | } else { |
10202 | params_ = params.vec(); |
10203 | } |
10204 | |
10205 | at::Tensor out0_; |
10206 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
10207 | at::functionalization::impl::sync(out0); |
10208 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
10209 | } else { |
10210 | out0_ = out0; |
10211 | } |
10212 | |
10213 | at::Tensor out1_; |
10214 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
10215 | at::functionalization::impl::sync(out1); |
10216 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
10217 | } else { |
10218 | out1_ = out1; |
10219 | } |
10220 | |
10221 | at::Tensor out2_; |
10222 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
10223 | at::functionalization::impl::sync(out2); |
10224 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
10225 | } else { |
10226 | out2_ = out2; |
10227 | } |
10228 | |
10229 | at::Tensor out3_; |
10230 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
10231 | at::functionalization::impl::sync(out3); |
10232 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
10233 | } else { |
10234 | out3_ = out3; |
10235 | } |
10236 | |
10237 | at::Tensor out4_; |
10238 | if (at::functionalization::impl::isFunctionalTensor(out4)) { |
10239 | at::functionalization::impl::sync(out4); |
10240 | out4_ = at::functionalization::impl::from_functional_tensor(out4); |
10241 | } else { |
10242 | out4_ = out4; |
10243 | } |
10244 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) { |
10245 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(hx) || at::functionalization::impl::isFunctionalTensor(params))) { |
10246 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10247 | TORCH_INTERNAL_ASSERT(false, |
10248 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10249 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10250 | } else { |
10251 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10252 | at::AutoDispatchSkipFunctionalize guard; |
10253 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_lstm_mps_out::call(input_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_, out1_, out2_, out3_, out4_); |
10254 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);; |
10255 | } |
10256 | } else { |
10257 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
10258 | { |
10259 | at::AutoDispatchSkipFunctionalize guard; |
10260 | tmp_output = at::_ops::_lstm_mps::call(input_, hx_, params_, has_biases, num_layers, dropout, train, bidirectional, batch_first); |
10261 | } |
10262 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
10263 | at::functionalization::impl::commit_update(out0); |
10264 | at::functionalization::impl::sync(out0); |
10265 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
10266 | at::functionalization::impl::commit_update(out1); |
10267 | at::functionalization::impl::sync(out1); |
10268 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
10269 | at::functionalization::impl::commit_update(out2); |
10270 | at::functionalization::impl::sync(out2); |
10271 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
10272 | at::functionalization::impl::commit_update(out3); |
10273 | at::functionalization::impl::sync(out3); |
10274 | at::functionalization::impl::replace_(out4, std::get<4>(tmp_output)); |
10275 | at::functionalization::impl::commit_update(out4); |
10276 | at::functionalization::impl::sync(out4); |
10277 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
10278 | } |
10279 | } |
10280 | |
10281 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_gru_cell_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { |
10282 | if (false) { |
10283 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10284 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10285 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10286 | auto grad_hy_meta = to_meta(grad_hy); |
10287 | auto workspace_meta = to_meta(workspace); |
10288 | auto out0_meta = to_meta(out0); |
10289 | auto out1_meta = to_meta(out1); |
10290 | auto out2_meta = to_meta(out2); |
10291 | auto out3_meta = to_meta(out3); |
10292 | auto out4_meta = to_meta(out4); |
10293 | at::AutoDispatchSkipFunctionalize func_guard; |
10294 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10295 | at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy_meta, workspace_meta, has_bias, out0_meta, out1_meta, out2_meta, out3_meta, out4_meta); |
10296 | } |
10297 | |
10298 | at::Tensor grad_hy_; |
10299 | if (at::functionalization::impl::isFunctionalTensor(grad_hy)) { |
10300 | at::functionalization::impl::sync(grad_hy); |
10301 | grad_hy_ = at::functionalization::impl::from_functional_tensor(grad_hy); |
10302 | } else { |
10303 | grad_hy_ = grad_hy; |
10304 | } |
10305 | |
10306 | at::Tensor workspace_; |
10307 | if (at::functionalization::impl::isFunctionalTensor(workspace)) { |
10308 | at::functionalization::impl::sync(workspace); |
10309 | workspace_ = at::functionalization::impl::from_functional_tensor(workspace); |
10310 | } else { |
10311 | workspace_ = workspace; |
10312 | } |
10313 | |
10314 | at::Tensor out0_; |
10315 | if (at::functionalization::impl::isFunctionalTensor(out0)) { |
10316 | at::functionalization::impl::sync(out0); |
10317 | out0_ = at::functionalization::impl::from_functional_tensor(out0); |
10318 | } else { |
10319 | out0_ = out0; |
10320 | } |
10321 | |
10322 | at::Tensor out1_; |
10323 | if (at::functionalization::impl::isFunctionalTensor(out1)) { |
10324 | at::functionalization::impl::sync(out1); |
10325 | out1_ = at::functionalization::impl::from_functional_tensor(out1); |
10326 | } else { |
10327 | out1_ = out1; |
10328 | } |
10329 | |
10330 | at::Tensor out2_; |
10331 | if (at::functionalization::impl::isFunctionalTensor(out2)) { |
10332 | at::functionalization::impl::sync(out2); |
10333 | out2_ = at::functionalization::impl::from_functional_tensor(out2); |
10334 | } else { |
10335 | out2_ = out2; |
10336 | } |
10337 | |
10338 | at::Tensor out3_; |
10339 | if (at::functionalization::impl::isFunctionalTensor(out3)) { |
10340 | at::functionalization::impl::sync(out3); |
10341 | out3_ = at::functionalization::impl::from_functional_tensor(out3); |
10342 | } else { |
10343 | out3_ = out3; |
10344 | } |
10345 | |
10346 | at::Tensor out4_; |
10347 | if (at::functionalization::impl::isFunctionalTensor(out4)) { |
10348 | at::functionalization::impl::sync(out4); |
10349 | out4_ = at::functionalization::impl::from_functional_tensor(out4); |
10350 | } else { |
10351 | out4_ = out4; |
10352 | } |
10353 | if (!(true && at::functionalization::impl::isFunctionalTensor(out0) && at::functionalization::impl::isFunctionalTensor(out1) && at::functionalization::impl::isFunctionalTensor(out2) && at::functionalization::impl::isFunctionalTensor(out3) && at::functionalization::impl::isFunctionalTensor(out4))) { |
10354 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_hy) || at::functionalization::impl::isFunctionalTensor(workspace))) { |
10355 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10356 | TORCH_INTERNAL_ASSERT(false, |
10357 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10358 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10359 | } else { |
10360 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10361 | at::AutoDispatchSkipFunctionalize guard; |
10362 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::_thnn_fused_gru_cell_backward_out::call(grad_hy_, workspace_, has_bias, out0_, out1_, out2_, out3_, out4_); |
10363 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4);; |
10364 | } |
10365 | } else { |
10366 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> tmp_output; |
10367 | { |
10368 | at::AutoDispatchSkipFunctionalize guard; |
10369 | tmp_output = at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy_, workspace_, has_bias); |
10370 | } |
10371 | at::functionalization::impl::replace_(out0, std::get<0>(tmp_output)); |
10372 | at::functionalization::impl::commit_update(out0); |
10373 | at::functionalization::impl::sync(out0); |
10374 | at::functionalization::impl::replace_(out1, std::get<1>(tmp_output)); |
10375 | at::functionalization::impl::commit_update(out1); |
10376 | at::functionalization::impl::sync(out1); |
10377 | at::functionalization::impl::replace_(out2, std::get<2>(tmp_output)); |
10378 | at::functionalization::impl::commit_update(out2); |
10379 | at::functionalization::impl::sync(out2); |
10380 | at::functionalization::impl::replace_(out3, std::get<3>(tmp_output)); |
10381 | at::functionalization::impl::commit_update(out3); |
10382 | at::functionalization::impl::sync(out3); |
10383 | at::functionalization::impl::replace_(out4, std::get<4>(tmp_output)); |
10384 | at::functionalization::impl::commit_update(out4); |
10385 | at::functionalization::impl::sync(out4); |
10386 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &>(out0, out1, out2, out3, out4); |
10387 | } |
10388 | } |
10389 | |
10390 | at::Tensor & set_out_source_Storage_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out) { |
10391 | if (false) { |
10392 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10393 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10394 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10395 | auto self_meta = to_meta(self); |
10396 | auto out_meta = to_meta(out); |
10397 | at::AutoDispatchSkipFunctionalize func_guard; |
10398 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10399 | at::_ops::set_source_Storage_out::call(self_meta, source, out_meta); |
10400 | } |
10401 | |
10402 | at::Tensor self_; |
10403 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10404 | at::functionalization::impl::sync(self); |
10405 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10406 | } else { |
10407 | self_ = self; |
10408 | } |
10409 | |
10410 | at::Tensor out_; |
10411 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10412 | at::functionalization::impl::sync(out); |
10413 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10414 | } else { |
10415 | out_ = out; |
10416 | } |
10417 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10418 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10419 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10420 | TORCH_INTERNAL_ASSERT(false, |
10421 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10422 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10423 | } else { |
10424 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10425 | at::AutoDispatchSkipFunctionalize guard; |
10426 | at::Tensor tmp_output = at::_ops::set_source_Storage_out::call(self_, source, out_); |
10427 | return out;; |
10428 | } |
10429 | } else { |
10430 | at::Tensor tmp_output; |
10431 | { |
10432 | at::AutoDispatchSkipFunctionalize guard; |
10433 | tmp_output = at::_ops::set_source_Storage::call(self_, source); |
10434 | } |
10435 | at::functionalization::impl::replace_(out, tmp_output); |
10436 | at::functionalization::impl::commit_update(out); |
10437 | at::functionalization::impl::sync(out); |
10438 | return out; |
10439 | } |
10440 | } |
10441 | |
10442 | at::Tensor & set__source_Storage(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source) { |
10443 | if (false) { |
10444 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10445 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10446 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10447 | auto self_meta = to_meta(self); |
10448 | at::AutoDispatchSkipFunctionalize func_guard; |
10449 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10450 | at::_ops::set__source_Storage::call(self_meta, source); |
10451 | } |
10452 | |
10453 | at::Tensor self_; |
10454 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10455 | at::functionalization::impl::sync(self); |
10456 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10457 | } else { |
10458 | self_ = self; |
10459 | } |
10460 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10461 | if ((false)) { |
10462 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10463 | TORCH_INTERNAL_ASSERT(false, |
10464 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10465 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10466 | } else { |
10467 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10468 | at::AutoDispatchSkipFunctionalize guard; |
10469 | at::Tensor tmp_output = at::_ops::set__source_Storage::call(self_, source); |
10470 | return self;; |
10471 | } |
10472 | } else { |
10473 | at::Tensor tmp_output; |
10474 | { |
10475 | at::AutoDispatchSkipFunctionalize guard; |
10476 | tmp_output = at::_ops::set_source_Storage::call(self_, source); |
10477 | } |
10478 | at::functionalization::impl::replace_(self, tmp_output); |
10479 | at::functionalization::impl::commit_update(self); |
10480 | at::functionalization::impl::sync(self); |
10481 | return self; |
10482 | } |
10483 | } |
10484 | |
10485 | at::Tensor & set_out_source_Storage_storage_offset_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { |
10486 | if (false) { |
10487 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10488 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10489 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10490 | auto self_meta = to_meta(self); |
10491 | auto out_meta = to_meta(out); |
10492 | at::AutoDispatchSkipFunctionalize func_guard; |
10493 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10494 | at::_ops::set_source_Storage_storage_offset_out::call(self_meta, source, storage_offset, size, stride, out_meta); |
10495 | } |
10496 | |
10497 | at::Tensor self_; |
10498 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10499 | at::functionalization::impl::sync(self); |
10500 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10501 | } else { |
10502 | self_ = self; |
10503 | } |
10504 | |
10505 | at::Tensor out_; |
10506 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10507 | at::functionalization::impl::sync(out); |
10508 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10509 | } else { |
10510 | out_ = out; |
10511 | } |
10512 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10513 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10514 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10515 | TORCH_INTERNAL_ASSERT(false, |
10516 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10517 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10518 | } else { |
10519 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10520 | at::AutoDispatchSkipFunctionalize guard; |
10521 | at::Tensor tmp_output = at::_ops::set_source_Storage_storage_offset_out::call(self_, source, storage_offset, size, stride, out_); |
10522 | return out;; |
10523 | } |
10524 | } else { |
10525 | at::Tensor tmp_output; |
10526 | { |
10527 | at::AutoDispatchSkipFunctionalize guard; |
10528 | tmp_output = at::_ops::set_source_Storage_storage_offset::call(self_, source, storage_offset, size, stride); |
10529 | } |
10530 | at::functionalization::impl::replace_(out, tmp_output); |
10531 | at::functionalization::impl::commit_update(out); |
10532 | at::functionalization::impl::sync(out); |
10533 | return out; |
10534 | } |
10535 | } |
10536 | |
10537 | at::Tensor & set__source_Storage_storage_offset(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { |
10538 | if (false) { |
10539 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10540 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10541 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10542 | auto self_meta = to_meta(self); |
10543 | at::AutoDispatchSkipFunctionalize func_guard; |
10544 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10545 | at::_ops::set__source_Storage_storage_offset::call(self_meta, source, storage_offset, size, stride); |
10546 | } |
10547 | |
10548 | at::Tensor self_; |
10549 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10550 | at::functionalization::impl::sync(self); |
10551 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10552 | } else { |
10553 | self_ = self; |
10554 | } |
10555 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10556 | if ((false)) { |
10557 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10558 | TORCH_INTERNAL_ASSERT(false, |
10559 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10560 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10561 | } else { |
10562 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10563 | at::AutoDispatchSkipFunctionalize guard; |
10564 | at::Tensor tmp_output = at::_ops::set__source_Storage_storage_offset::call(self_, source, storage_offset, size, stride); |
10565 | return self;; |
10566 | } |
10567 | } else { |
10568 | at::Tensor tmp_output; |
10569 | { |
10570 | at::AutoDispatchSkipFunctionalize guard; |
10571 | tmp_output = at::_ops::set_source_Storage_storage_offset::call(self_, source, storage_offset, size, stride); |
10572 | } |
10573 | at::functionalization::impl::replace_(self, tmp_output); |
10574 | at::functionalization::impl::commit_update(self); |
10575 | at::functionalization::impl::sync(self); |
10576 | return self; |
10577 | } |
10578 | } |
10579 | |
10580 | at::Tensor & set_out_source_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { |
10581 | if (false) { |
10582 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10583 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10584 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10585 | auto self_meta = to_meta(self); |
10586 | auto source_meta = to_meta(source); |
10587 | auto out_meta = to_meta(out); |
10588 | at::AutoDispatchSkipFunctionalize func_guard; |
10589 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10590 | at::_ops::set_source_Tensor_out::call(self_meta, source_meta, out_meta); |
10591 | } |
10592 | |
10593 | at::Tensor self_; |
10594 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10595 | at::functionalization::impl::sync(self); |
10596 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10597 | } else { |
10598 | self_ = self; |
10599 | } |
10600 | |
10601 | at::Tensor source_; |
10602 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
10603 | at::functionalization::impl::sync(source); |
10604 | source_ = at::functionalization::impl::from_functional_tensor(source); |
10605 | } else { |
10606 | source_ = source; |
10607 | } |
10608 | |
10609 | at::Tensor out_; |
10610 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10611 | at::functionalization::impl::sync(out); |
10612 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10613 | } else { |
10614 | out_ = out; |
10615 | } |
10616 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10617 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(source))) { |
10618 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10619 | TORCH_INTERNAL_ASSERT(false, |
10620 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10621 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10622 | } else { |
10623 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10624 | at::AutoDispatchSkipFunctionalize guard; |
10625 | at::Tensor tmp_output = at::_ops::set_source_Tensor_out::call(self_, source_, out_); |
10626 | return out;; |
10627 | } |
10628 | } else { |
10629 | at::Tensor tmp_output; |
10630 | { |
10631 | at::AutoDispatchSkipFunctionalize guard; |
10632 | tmp_output = at::_ops::set_source_Tensor::call(self_, source_); |
10633 | } |
10634 | at::functionalization::impl::replace_(out, tmp_output); |
10635 | at::functionalization::impl::commit_update(out); |
10636 | at::functionalization::impl::sync(out); |
10637 | return out; |
10638 | } |
10639 | } |
10640 | |
10641 | at::Tensor & set__source_Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source) { |
10642 | if (true) { |
10643 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10644 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10645 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10646 | auto self_meta = to_meta(self); |
10647 | auto source_meta = to_meta(source); |
10648 | at::AutoDispatchSkipFunctionalize func_guard; |
10649 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10650 | at::_ops::set__source_Tensor::call(self_meta, source_meta); |
10651 | } |
10652 | |
10653 | at::Tensor self_; |
10654 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10655 | at::functionalization::impl::sync(self); |
10656 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10657 | } else { |
10658 | self_ = self; |
10659 | } |
10660 | |
10661 | at::Tensor source_; |
10662 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
10663 | at::functionalization::impl::sync(source); |
10664 | source_ = at::functionalization::impl::from_functional_tensor(source); |
10665 | } else { |
10666 | source_ = source; |
10667 | } |
10668 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10669 | if ((false || at::functionalization::impl::isFunctionalTensor(source))) { |
10670 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10671 | TORCH_INTERNAL_ASSERT(false, |
10672 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10673 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10674 | } else { |
10675 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10676 | at::AutoDispatchSkipFunctionalize guard; |
10677 | at::Tensor tmp_output = at::_ops::set__source_Tensor::call(self_, source_); |
10678 | return self;; |
10679 | } |
10680 | } else { |
10681 | at::Tensor tmp_output; |
10682 | { |
10683 | at::AutoDispatchSkipFunctionalize guard; |
10684 | tmp_output = at::_ops::set_source_Tensor::call(self_, source_); |
10685 | } |
10686 | at::functionalization::impl::replace_(self, tmp_output); |
10687 | at::functionalization::impl::commit_update(self); |
10688 | at::functionalization::impl::sync(self); |
10689 | return self; |
10690 | } |
10691 | } |
10692 | |
10693 | at::Tensor & set_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
10694 | if (false) { |
10695 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10696 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10697 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10698 | auto self_meta = to_meta(self); |
10699 | auto out_meta = to_meta(out); |
10700 | at::AutoDispatchSkipFunctionalize func_guard; |
10701 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10702 | at::_ops::set_out::call(self_meta, out_meta); |
10703 | } |
10704 | |
10705 | at::Tensor self_; |
10706 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10707 | at::functionalization::impl::sync(self); |
10708 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10709 | } else { |
10710 | self_ = self; |
10711 | } |
10712 | |
10713 | at::Tensor out_; |
10714 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10715 | at::functionalization::impl::sync(out); |
10716 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10717 | } else { |
10718 | out_ = out; |
10719 | } |
10720 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10721 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10722 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10723 | TORCH_INTERNAL_ASSERT(false, |
10724 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10725 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10726 | } else { |
10727 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10728 | at::AutoDispatchSkipFunctionalize guard; |
10729 | at::Tensor tmp_output = at::_ops::set_out::call(self_, out_); |
10730 | return out;; |
10731 | } |
10732 | } else { |
10733 | at::Tensor tmp_output; |
10734 | { |
10735 | at::AutoDispatchSkipFunctionalize guard; |
10736 | tmp_output = at::_ops::set::call(self_); |
10737 | } |
10738 | at::functionalization::impl::replace_(out, tmp_output); |
10739 | at::functionalization::impl::commit_update(out); |
10740 | at::functionalization::impl::sync(out); |
10741 | return out; |
10742 | } |
10743 | } |
10744 | |
10745 | at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
10746 | if (true) { |
10747 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10748 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10749 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10750 | auto self_meta = to_meta(self); |
10751 | at::AutoDispatchSkipFunctionalize func_guard; |
10752 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10753 | at::_ops::set_::call(self_meta); |
10754 | } |
10755 | |
10756 | at::Tensor self_; |
10757 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10758 | at::functionalization::impl::sync(self); |
10759 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10760 | } else { |
10761 | self_ = self; |
10762 | } |
10763 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10764 | if ((false)) { |
10765 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10766 | TORCH_INTERNAL_ASSERT(false, |
10767 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10768 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10769 | } else { |
10770 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10771 | at::AutoDispatchSkipFunctionalize guard; |
10772 | at::Tensor tmp_output = at::_ops::set_::call(self_); |
10773 | return self;; |
10774 | } |
10775 | } else { |
10776 | at::Tensor tmp_output; |
10777 | { |
10778 | at::AutoDispatchSkipFunctionalize guard; |
10779 | tmp_output = at::_ops::set::call(self_); |
10780 | } |
10781 | at::functionalization::impl::replace_(self, tmp_output); |
10782 | at::functionalization::impl::commit_update(self); |
10783 | at::functionalization::impl::sync(self); |
10784 | return self; |
10785 | } |
10786 | } |
10787 | |
10788 | at::Tensor & lift_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
10789 | if (false) { |
10790 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10791 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10792 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10793 | auto self_meta = to_meta(self); |
10794 | auto out_meta = to_meta(out); |
10795 | at::AutoDispatchSkipFunctionalize func_guard; |
10796 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10797 | at::_ops::lift_out::call(self_meta, out_meta); |
10798 | } |
10799 | |
10800 | at::Tensor self_; |
10801 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10802 | at::functionalization::impl::sync(self); |
10803 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10804 | } else { |
10805 | self_ = self; |
10806 | } |
10807 | |
10808 | at::Tensor out_; |
10809 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10810 | at::functionalization::impl::sync(out); |
10811 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10812 | } else { |
10813 | out_ = out; |
10814 | } |
10815 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10816 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
10817 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10818 | TORCH_INTERNAL_ASSERT(false, |
10819 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10820 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10821 | } else { |
10822 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10823 | at::AutoDispatchSkipFunctionalize guard; |
10824 | at::Tensor tmp_output = at::_ops::lift_out::call(self_, out_); |
10825 | return out;; |
10826 | } |
10827 | } else { |
10828 | at::Tensor tmp_output; |
10829 | { |
10830 | at::AutoDispatchSkipFunctionalize guard; |
10831 | tmp_output = at::_ops::lift::call(self_); |
10832 | } |
10833 | at::functionalization::impl::replace_(out, tmp_output); |
10834 | at::functionalization::impl::commit_update(out); |
10835 | at::functionalization::impl::sync(out); |
10836 | return out; |
10837 | } |
10838 | } |
10839 | |
10840 | at::Tensor & masked_scatter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { |
10841 | if (false) { |
10842 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10843 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10844 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10845 | auto self_meta = to_meta(self); |
10846 | auto mask_meta = to_meta(mask); |
10847 | auto source_meta = to_meta(source); |
10848 | auto out_meta = to_meta(out); |
10849 | at::AutoDispatchSkipFunctionalize func_guard; |
10850 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10851 | at::_ops::masked_scatter_out::call(self_meta, mask_meta, source_meta, out_meta); |
10852 | } |
10853 | |
10854 | at::Tensor self_; |
10855 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10856 | at::functionalization::impl::sync(self); |
10857 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10858 | } else { |
10859 | self_ = self; |
10860 | } |
10861 | |
10862 | at::Tensor mask_; |
10863 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
10864 | at::functionalization::impl::sync(mask); |
10865 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
10866 | } else { |
10867 | mask_ = mask; |
10868 | } |
10869 | |
10870 | at::Tensor source_; |
10871 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
10872 | at::functionalization::impl::sync(source); |
10873 | source_ = at::functionalization::impl::from_functional_tensor(source); |
10874 | } else { |
10875 | source_ = source; |
10876 | } |
10877 | |
10878 | at::Tensor out_; |
10879 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
10880 | at::functionalization::impl::sync(out); |
10881 | out_ = at::functionalization::impl::from_functional_tensor(out); |
10882 | } else { |
10883 | out_ = out; |
10884 | } |
10885 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
10886 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(source))) { |
10887 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10888 | TORCH_INTERNAL_ASSERT(false, |
10889 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10890 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10891 | } else { |
10892 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10893 | at::AutoDispatchSkipFunctionalize guard; |
10894 | at::Tensor tmp_output = at::_ops::masked_scatter_out::call(self_, mask_, source_, out_); |
10895 | return out;; |
10896 | } |
10897 | } else { |
10898 | at::Tensor tmp_output; |
10899 | { |
10900 | at::AutoDispatchSkipFunctionalize guard; |
10901 | tmp_output = at::_ops::masked_scatter::call(self_, mask_, source_); |
10902 | } |
10903 | at::functionalization::impl::replace_(out, tmp_output); |
10904 | at::functionalization::impl::commit_update(out); |
10905 | at::functionalization::impl::sync(out); |
10906 | return out; |
10907 | } |
10908 | } |
10909 | |
10910 | at::Tensor & masked_scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { |
10911 | if (true) { |
10912 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10913 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10914 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10915 | auto self_meta = to_meta(self); |
10916 | auto mask_meta = to_meta(mask); |
10917 | auto source_meta = to_meta(source); |
10918 | at::AutoDispatchSkipFunctionalize func_guard; |
10919 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10920 | at::_ops::masked_scatter_::call(self_meta, mask_meta, source_meta); |
10921 | } |
10922 | |
10923 | at::Tensor self_; |
10924 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10925 | at::functionalization::impl::sync(self); |
10926 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10927 | } else { |
10928 | self_ = self; |
10929 | } |
10930 | |
10931 | at::Tensor mask_; |
10932 | if (at::functionalization::impl::isFunctionalTensor(mask)) { |
10933 | at::functionalization::impl::sync(mask); |
10934 | mask_ = at::functionalization::impl::from_functional_tensor(mask); |
10935 | } else { |
10936 | mask_ = mask; |
10937 | } |
10938 | |
10939 | at::Tensor source_; |
10940 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
10941 | at::functionalization::impl::sync(source); |
10942 | source_ = at::functionalization::impl::from_functional_tensor(source); |
10943 | } else { |
10944 | source_ = source; |
10945 | } |
10946 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
10947 | if ((false || at::functionalization::impl::isFunctionalTensor(mask) || at::functionalization::impl::isFunctionalTensor(source))) { |
10948 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
10949 | TORCH_INTERNAL_ASSERT(false, |
10950 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
10951 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
10952 | } else { |
10953 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
10954 | at::AutoDispatchSkipFunctionalize guard; |
10955 | at::Tensor tmp_output = at::_ops::masked_scatter_::call(self_, mask_, source_); |
10956 | return self;; |
10957 | } |
10958 | } else { |
10959 | at::Tensor tmp_output; |
10960 | { |
10961 | at::AutoDispatchSkipFunctionalize guard; |
10962 | tmp_output = at::_ops::masked_scatter::call(self_, mask_, source_); |
10963 | } |
10964 | at::functionalization::impl::replace_(self, tmp_output); |
10965 | at::functionalization::impl::commit_update(self); |
10966 | at::functionalization::impl::sync(self); |
10967 | return self; |
10968 | } |
10969 | } |
10970 | |
10971 | at::Tensor & index_reduce_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) { |
10972 | if (false) { |
10973 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
10974 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
10975 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
10976 | auto self_meta = to_meta(self); |
10977 | auto index_meta = to_meta(index); |
10978 | auto source_meta = to_meta(source); |
10979 | auto out_meta = to_meta(out); |
10980 | at::AutoDispatchSkipFunctionalize func_guard; |
10981 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
10982 | at::_ops::index_reduce_out::call(self_meta, dim, index_meta, source_meta, reduce, include_self, out_meta); |
10983 | } |
10984 | |
10985 | at::Tensor self_; |
10986 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
10987 | at::functionalization::impl::sync(self); |
10988 | self_ = at::functionalization::impl::from_functional_tensor(self); |
10989 | } else { |
10990 | self_ = self; |
10991 | } |
10992 | |
10993 | at::Tensor index_; |
10994 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
10995 | at::functionalization::impl::sync(index); |
10996 | index_ = at::functionalization::impl::from_functional_tensor(index); |
10997 | } else { |
10998 | index_ = index; |
10999 | } |
11000 | |
11001 | at::Tensor source_; |
11002 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
11003 | at::functionalization::impl::sync(source); |
11004 | source_ = at::functionalization::impl::from_functional_tensor(source); |
11005 | } else { |
11006 | source_ = source; |
11007 | } |
11008 | |
11009 | at::Tensor out_; |
11010 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11011 | at::functionalization::impl::sync(out); |
11012 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11013 | } else { |
11014 | out_ = out; |
11015 | } |
11016 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11017 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
11018 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11019 | TORCH_INTERNAL_ASSERT(false, |
11020 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11021 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11022 | } else { |
11023 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11024 | at::AutoDispatchSkipFunctionalize guard; |
11025 | at::Tensor tmp_output = at::_ops::index_reduce_out::call(self_, dim, index_, source_, reduce, include_self, out_); |
11026 | return out;; |
11027 | } |
11028 | } else { |
11029 | at::Tensor tmp_output; |
11030 | { |
11031 | at::AutoDispatchSkipFunctionalize guard; |
11032 | tmp_output = at::_ops::index_reduce::call(self_, dim, index_, source_, reduce, include_self); |
11033 | } |
11034 | at::functionalization::impl::replace_(out, tmp_output); |
11035 | at::functionalization::impl::commit_update(out); |
11036 | at::functionalization::impl::sync(out); |
11037 | return out; |
11038 | } |
11039 | } |
11040 | |
11041 | at::Tensor & index_reduce_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) { |
11042 | if (true) { |
11043 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11044 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11045 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11046 | auto self_meta = to_meta(self); |
11047 | auto index_meta = to_meta(index); |
11048 | auto source_meta = to_meta(source); |
11049 | at::AutoDispatchSkipFunctionalize func_guard; |
11050 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11051 | at::_ops::index_reduce_::call(self_meta, dim, index_meta, source_meta, reduce, include_self); |
11052 | } |
11053 | |
11054 | at::Tensor self_; |
11055 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11056 | at::functionalization::impl::sync(self); |
11057 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11058 | } else { |
11059 | self_ = self; |
11060 | } |
11061 | |
11062 | at::Tensor index_; |
11063 | if (at::functionalization::impl::isFunctionalTensor(index)) { |
11064 | at::functionalization::impl::sync(index); |
11065 | index_ = at::functionalization::impl::from_functional_tensor(index); |
11066 | } else { |
11067 | index_ = index; |
11068 | } |
11069 | |
11070 | at::Tensor source_; |
11071 | if (at::functionalization::impl::isFunctionalTensor(source)) { |
11072 | at::functionalization::impl::sync(source); |
11073 | source_ = at::functionalization::impl::from_functional_tensor(source); |
11074 | } else { |
11075 | source_ = source; |
11076 | } |
11077 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11078 | if ((false || at::functionalization::impl::isFunctionalTensor(index) || at::functionalization::impl::isFunctionalTensor(source))) { |
11079 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11080 | TORCH_INTERNAL_ASSERT(false, |
11081 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11082 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11083 | } else { |
11084 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11085 | at::AutoDispatchSkipFunctionalize guard; |
11086 | at::Tensor tmp_output = at::_ops::index_reduce_::call(self_, dim, index_, source_, reduce, include_self); |
11087 | return self;; |
11088 | } |
11089 | } else { |
11090 | at::Tensor tmp_output; |
11091 | { |
11092 | at::AutoDispatchSkipFunctionalize guard; |
11093 | tmp_output = at::_ops::index_reduce::call(self_, dim, index_, source_, reduce, include_self); |
11094 | } |
11095 | at::functionalization::impl::replace_(self, tmp_output); |
11096 | at::functionalization::impl::commit_update(self); |
11097 | at::functionalization::impl::sync(self); |
11098 | return self; |
11099 | } |
11100 | } |
11101 | |
11102 | at::Tensor & eq_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11103 | if (false) { |
11104 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11105 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11106 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11107 | auto self_meta = to_meta(self); |
11108 | auto out_meta = to_meta(out); |
11109 | at::AutoDispatchSkipFunctionalize func_guard; |
11110 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11111 | at::_ops::eq_Scalar_out::call(self_meta, other, out_meta); |
11112 | } |
11113 | |
11114 | at::Tensor self_; |
11115 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11116 | at::functionalization::impl::sync(self); |
11117 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11118 | } else { |
11119 | self_ = self; |
11120 | } |
11121 | |
11122 | at::Tensor out_; |
11123 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11124 | at::functionalization::impl::sync(out); |
11125 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11126 | } else { |
11127 | out_ = out; |
11128 | } |
11129 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11130 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11131 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11132 | TORCH_INTERNAL_ASSERT(false, |
11133 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11134 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11135 | } else { |
11136 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11137 | at::AutoDispatchSkipFunctionalize guard; |
11138 | at::Tensor tmp_output = at::_ops::eq_Scalar_out::call(self_, other, out_); |
11139 | return out;; |
11140 | } |
11141 | } else { |
11142 | at::Tensor tmp_output; |
11143 | { |
11144 | at::AutoDispatchSkipFunctionalize guard; |
11145 | tmp_output = at::_ops::eq_Scalar::call(self_, other); |
11146 | } |
11147 | at::functionalization::impl::replace_(out, tmp_output); |
11148 | at::functionalization::impl::commit_update(out); |
11149 | at::functionalization::impl::sync(out); |
11150 | return out; |
11151 | } |
11152 | } |
11153 | |
11154 | at::Tensor & eq__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
11155 | if (true) { |
11156 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11157 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11158 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11159 | auto self_meta = to_meta(self); |
11160 | at::AutoDispatchSkipFunctionalize func_guard; |
11161 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11162 | at::_ops::eq__Scalar::call(self_meta, other); |
11163 | } |
11164 | |
11165 | at::Tensor self_; |
11166 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11167 | at::functionalization::impl::sync(self); |
11168 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11169 | } else { |
11170 | self_ = self; |
11171 | } |
11172 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11173 | if ((false)) { |
11174 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11175 | TORCH_INTERNAL_ASSERT(false, |
11176 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11177 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11178 | } else { |
11179 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11180 | at::AutoDispatchSkipFunctionalize guard; |
11181 | at::Tensor tmp_output = at::_ops::eq__Scalar::call(self_, other); |
11182 | return self;; |
11183 | } |
11184 | } else { |
11185 | at::Tensor tmp_output; |
11186 | { |
11187 | at::AutoDispatchSkipFunctionalize guard; |
11188 | tmp_output = at::_ops::eq_Scalar::call(self_, other); |
11189 | } |
11190 | at::functionalization::impl::replace_(self, tmp_output); |
11191 | at::functionalization::impl::commit_update(self); |
11192 | at::functionalization::impl::sync(self); |
11193 | return self; |
11194 | } |
11195 | } |
11196 | |
11197 | at::Tensor & eq_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11198 | if (false) { |
11199 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11200 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11201 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11202 | auto self_meta = to_meta(self); |
11203 | auto other_meta = to_meta(other); |
11204 | auto out_meta = to_meta(out); |
11205 | at::AutoDispatchSkipFunctionalize func_guard; |
11206 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11207 | at::_ops::eq_Tensor_out::call(self_meta, other_meta, out_meta); |
11208 | } |
11209 | |
11210 | at::Tensor self_; |
11211 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11212 | at::functionalization::impl::sync(self); |
11213 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11214 | } else { |
11215 | self_ = self; |
11216 | } |
11217 | |
11218 | at::Tensor other_; |
11219 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11220 | at::functionalization::impl::sync(other); |
11221 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11222 | } else { |
11223 | other_ = other; |
11224 | } |
11225 | |
11226 | at::Tensor out_; |
11227 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11228 | at::functionalization::impl::sync(out); |
11229 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11230 | } else { |
11231 | out_ = out; |
11232 | } |
11233 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11234 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
11235 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11236 | TORCH_INTERNAL_ASSERT(false, |
11237 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11238 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11239 | } else { |
11240 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11241 | at::AutoDispatchSkipFunctionalize guard; |
11242 | at::Tensor tmp_output = at::_ops::eq_Tensor_out::call(self_, other_, out_); |
11243 | return out;; |
11244 | } |
11245 | } else { |
11246 | at::Tensor tmp_output; |
11247 | { |
11248 | at::AutoDispatchSkipFunctionalize guard; |
11249 | tmp_output = at::_ops::eq_Tensor::call(self_, other_); |
11250 | } |
11251 | at::functionalization::impl::replace_(out, tmp_output); |
11252 | at::functionalization::impl::commit_update(out); |
11253 | at::functionalization::impl::sync(out); |
11254 | return out; |
11255 | } |
11256 | } |
11257 | |
11258 | at::Tensor & eq__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
11259 | if (true) { |
11260 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11261 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11262 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11263 | auto self_meta = to_meta(self); |
11264 | auto other_meta = to_meta(other); |
11265 | at::AutoDispatchSkipFunctionalize func_guard; |
11266 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11267 | at::_ops::eq__Tensor::call(self_meta, other_meta); |
11268 | } |
11269 | |
11270 | at::Tensor self_; |
11271 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11272 | at::functionalization::impl::sync(self); |
11273 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11274 | } else { |
11275 | self_ = self; |
11276 | } |
11277 | |
11278 | at::Tensor other_; |
11279 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11280 | at::functionalization::impl::sync(other); |
11281 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11282 | } else { |
11283 | other_ = other; |
11284 | } |
11285 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11286 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11287 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11288 | TORCH_INTERNAL_ASSERT(false, |
11289 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11290 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11291 | } else { |
11292 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11293 | at::AutoDispatchSkipFunctionalize guard; |
11294 | at::Tensor tmp_output = at::_ops::eq__Tensor::call(self_, other_); |
11295 | return self;; |
11296 | } |
11297 | } else { |
11298 | at::Tensor tmp_output; |
11299 | { |
11300 | at::AutoDispatchSkipFunctionalize guard; |
11301 | tmp_output = at::_ops::eq_Tensor::call(self_, other_); |
11302 | } |
11303 | at::functionalization::impl::replace_(self, tmp_output); |
11304 | at::functionalization::impl::commit_update(self); |
11305 | at::functionalization::impl::sync(self); |
11306 | return self; |
11307 | } |
11308 | } |
11309 | |
11310 | at::Tensor & bitwise_and_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11311 | if (false) { |
11312 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11313 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11314 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11315 | auto self_meta = to_meta(self); |
11316 | auto other_meta = to_meta(other); |
11317 | auto out_meta = to_meta(out); |
11318 | at::AutoDispatchSkipFunctionalize func_guard; |
11319 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11320 | at::_ops::bitwise_and_Tensor_out::call(self_meta, other_meta, out_meta); |
11321 | } |
11322 | |
11323 | at::Tensor self_; |
11324 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11325 | at::functionalization::impl::sync(self); |
11326 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11327 | } else { |
11328 | self_ = self; |
11329 | } |
11330 | |
11331 | at::Tensor other_; |
11332 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11333 | at::functionalization::impl::sync(other); |
11334 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11335 | } else { |
11336 | other_ = other; |
11337 | } |
11338 | |
11339 | at::Tensor out_; |
11340 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11341 | at::functionalization::impl::sync(out); |
11342 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11343 | } else { |
11344 | out_ = out; |
11345 | } |
11346 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11347 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
11348 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11349 | TORCH_INTERNAL_ASSERT(false, |
11350 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11351 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11352 | } else { |
11353 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11354 | at::AutoDispatchSkipFunctionalize guard; |
11355 | at::Tensor tmp_output = at::_ops::bitwise_and_Tensor_out::call(self_, other_, out_); |
11356 | return out;; |
11357 | } |
11358 | } else { |
11359 | at::Tensor tmp_output; |
11360 | { |
11361 | at::AutoDispatchSkipFunctionalize guard; |
11362 | tmp_output = at::_ops::bitwise_and_Tensor::call(self_, other_); |
11363 | } |
11364 | at::functionalization::impl::replace_(out, tmp_output); |
11365 | at::functionalization::impl::commit_update(out); |
11366 | at::functionalization::impl::sync(out); |
11367 | return out; |
11368 | } |
11369 | } |
11370 | |
11371 | at::Tensor & bitwise_and__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
11372 | if (true) { |
11373 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11374 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11375 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11376 | auto self_meta = to_meta(self); |
11377 | auto other_meta = to_meta(other); |
11378 | at::AutoDispatchSkipFunctionalize func_guard; |
11379 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11380 | at::_ops::bitwise_and__Tensor::call(self_meta, other_meta); |
11381 | } |
11382 | |
11383 | at::Tensor self_; |
11384 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11385 | at::functionalization::impl::sync(self); |
11386 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11387 | } else { |
11388 | self_ = self; |
11389 | } |
11390 | |
11391 | at::Tensor other_; |
11392 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11393 | at::functionalization::impl::sync(other); |
11394 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11395 | } else { |
11396 | other_ = other; |
11397 | } |
11398 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11399 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11400 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11401 | TORCH_INTERNAL_ASSERT(false, |
11402 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11403 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11404 | } else { |
11405 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11406 | at::AutoDispatchSkipFunctionalize guard; |
11407 | at::Tensor tmp_output = at::_ops::bitwise_and__Tensor::call(self_, other_); |
11408 | return self;; |
11409 | } |
11410 | } else { |
11411 | at::Tensor tmp_output; |
11412 | { |
11413 | at::AutoDispatchSkipFunctionalize guard; |
11414 | tmp_output = at::_ops::bitwise_and_Tensor::call(self_, other_); |
11415 | } |
11416 | at::functionalization::impl::replace_(self, tmp_output); |
11417 | at::functionalization::impl::commit_update(self); |
11418 | at::functionalization::impl::sync(self); |
11419 | return self; |
11420 | } |
11421 | } |
11422 | |
11423 | at::Tensor & bitwise_and_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11424 | if (false) { |
11425 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11426 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11427 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11428 | auto self_meta = to_meta(self); |
11429 | auto out_meta = to_meta(out); |
11430 | at::AutoDispatchSkipFunctionalize func_guard; |
11431 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11432 | at::_ops::bitwise_and_Scalar_out::call(self_meta, other, out_meta); |
11433 | } |
11434 | |
11435 | at::Tensor self_; |
11436 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11437 | at::functionalization::impl::sync(self); |
11438 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11439 | } else { |
11440 | self_ = self; |
11441 | } |
11442 | |
11443 | at::Tensor out_; |
11444 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11445 | at::functionalization::impl::sync(out); |
11446 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11447 | } else { |
11448 | out_ = out; |
11449 | } |
11450 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11451 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11452 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11453 | TORCH_INTERNAL_ASSERT(false, |
11454 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11455 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11456 | } else { |
11457 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11458 | at::AutoDispatchSkipFunctionalize guard; |
11459 | at::Tensor tmp_output = at::_ops::bitwise_and_Scalar_out::call(self_, other, out_); |
11460 | return out;; |
11461 | } |
11462 | } else { |
11463 | at::Tensor tmp_output; |
11464 | { |
11465 | at::AutoDispatchSkipFunctionalize guard; |
11466 | tmp_output = at::_ops::bitwise_and_Scalar::call(self_, other); |
11467 | } |
11468 | at::functionalization::impl::replace_(out, tmp_output); |
11469 | at::functionalization::impl::commit_update(out); |
11470 | at::functionalization::impl::sync(out); |
11471 | return out; |
11472 | } |
11473 | } |
11474 | |
11475 | at::Tensor & bitwise_and__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
11476 | if (true) { |
11477 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11478 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11479 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11480 | auto self_meta = to_meta(self); |
11481 | at::AutoDispatchSkipFunctionalize func_guard; |
11482 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11483 | at::_ops::bitwise_and__Scalar::call(self_meta, other); |
11484 | } |
11485 | |
11486 | at::Tensor self_; |
11487 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11488 | at::functionalization::impl::sync(self); |
11489 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11490 | } else { |
11491 | self_ = self; |
11492 | } |
11493 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11494 | if ((false)) { |
11495 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11496 | TORCH_INTERNAL_ASSERT(false, |
11497 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11498 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11499 | } else { |
11500 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11501 | at::AutoDispatchSkipFunctionalize guard; |
11502 | at::Tensor tmp_output = at::_ops::bitwise_and__Scalar::call(self_, other); |
11503 | return self;; |
11504 | } |
11505 | } else { |
11506 | at::Tensor tmp_output; |
11507 | { |
11508 | at::AutoDispatchSkipFunctionalize guard; |
11509 | tmp_output = at::_ops::bitwise_and_Scalar::call(self_, other); |
11510 | } |
11511 | at::functionalization::impl::replace_(self, tmp_output); |
11512 | at::functionalization::impl::commit_update(self); |
11513 | at::functionalization::impl::sync(self); |
11514 | return self; |
11515 | } |
11516 | } |
11517 | |
11518 | at::Tensor & bitwise_and_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11519 | if (false) { |
11520 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11521 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11522 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11523 | auto other_meta = to_meta(other); |
11524 | auto out_meta = to_meta(out); |
11525 | at::AutoDispatchSkipFunctionalize func_guard; |
11526 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11527 | at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other_meta, out_meta); |
11528 | } |
11529 | |
11530 | at::Tensor other_; |
11531 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11532 | at::functionalization::impl::sync(other); |
11533 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11534 | } else { |
11535 | other_ = other; |
11536 | } |
11537 | |
11538 | at::Tensor out_; |
11539 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11540 | at::functionalization::impl::sync(out); |
11541 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11542 | } else { |
11543 | out_ = out; |
11544 | } |
11545 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11546 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11547 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11548 | TORCH_INTERNAL_ASSERT(false, |
11549 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11550 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11551 | } else { |
11552 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11553 | at::AutoDispatchSkipFunctionalize guard; |
11554 | at::Tensor tmp_output = at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other_, out_); |
11555 | return out;; |
11556 | } |
11557 | } else { |
11558 | at::Tensor tmp_output; |
11559 | { |
11560 | at::AutoDispatchSkipFunctionalize guard; |
11561 | tmp_output = at::_ops::bitwise_and_Scalar_Tensor::call(self, other_); |
11562 | } |
11563 | at::functionalization::impl::replace_(out, tmp_output); |
11564 | at::functionalization::impl::commit_update(out); |
11565 | at::functionalization::impl::sync(out); |
11566 | return out; |
11567 | } |
11568 | } |
11569 | |
11570 | at::Tensor & bitwise_or_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11571 | if (false) { |
11572 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11573 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11574 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11575 | auto self_meta = to_meta(self); |
11576 | auto other_meta = to_meta(other); |
11577 | auto out_meta = to_meta(out); |
11578 | at::AutoDispatchSkipFunctionalize func_guard; |
11579 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11580 | at::_ops::bitwise_or_Tensor_out::call(self_meta, other_meta, out_meta); |
11581 | } |
11582 | |
11583 | at::Tensor self_; |
11584 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11585 | at::functionalization::impl::sync(self); |
11586 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11587 | } else { |
11588 | self_ = self; |
11589 | } |
11590 | |
11591 | at::Tensor other_; |
11592 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11593 | at::functionalization::impl::sync(other); |
11594 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11595 | } else { |
11596 | other_ = other; |
11597 | } |
11598 | |
11599 | at::Tensor out_; |
11600 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11601 | at::functionalization::impl::sync(out); |
11602 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11603 | } else { |
11604 | out_ = out; |
11605 | } |
11606 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11607 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
11608 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11609 | TORCH_INTERNAL_ASSERT(false, |
11610 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11611 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11612 | } else { |
11613 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11614 | at::AutoDispatchSkipFunctionalize guard; |
11615 | at::Tensor tmp_output = at::_ops::bitwise_or_Tensor_out::call(self_, other_, out_); |
11616 | return out;; |
11617 | } |
11618 | } else { |
11619 | at::Tensor tmp_output; |
11620 | { |
11621 | at::AutoDispatchSkipFunctionalize guard; |
11622 | tmp_output = at::_ops::bitwise_or_Tensor::call(self_, other_); |
11623 | } |
11624 | at::functionalization::impl::replace_(out, tmp_output); |
11625 | at::functionalization::impl::commit_update(out); |
11626 | at::functionalization::impl::sync(out); |
11627 | return out; |
11628 | } |
11629 | } |
11630 | |
11631 | at::Tensor & bitwise_or__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
11632 | if (true) { |
11633 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11634 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11635 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11636 | auto self_meta = to_meta(self); |
11637 | auto other_meta = to_meta(other); |
11638 | at::AutoDispatchSkipFunctionalize func_guard; |
11639 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11640 | at::_ops::bitwise_or__Tensor::call(self_meta, other_meta); |
11641 | } |
11642 | |
11643 | at::Tensor self_; |
11644 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11645 | at::functionalization::impl::sync(self); |
11646 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11647 | } else { |
11648 | self_ = self; |
11649 | } |
11650 | |
11651 | at::Tensor other_; |
11652 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11653 | at::functionalization::impl::sync(other); |
11654 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11655 | } else { |
11656 | other_ = other; |
11657 | } |
11658 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11659 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11660 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11661 | TORCH_INTERNAL_ASSERT(false, |
11662 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11663 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11664 | } else { |
11665 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11666 | at::AutoDispatchSkipFunctionalize guard; |
11667 | at::Tensor tmp_output = at::_ops::bitwise_or__Tensor::call(self_, other_); |
11668 | return self;; |
11669 | } |
11670 | } else { |
11671 | at::Tensor tmp_output; |
11672 | { |
11673 | at::AutoDispatchSkipFunctionalize guard; |
11674 | tmp_output = at::_ops::bitwise_or_Tensor::call(self_, other_); |
11675 | } |
11676 | at::functionalization::impl::replace_(self, tmp_output); |
11677 | at::functionalization::impl::commit_update(self); |
11678 | at::functionalization::impl::sync(self); |
11679 | return self; |
11680 | } |
11681 | } |
11682 | |
11683 | at::Tensor & bitwise_or_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11684 | if (false) { |
11685 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11686 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11687 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11688 | auto self_meta = to_meta(self); |
11689 | auto out_meta = to_meta(out); |
11690 | at::AutoDispatchSkipFunctionalize func_guard; |
11691 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11692 | at::_ops::bitwise_or_Scalar_out::call(self_meta, other, out_meta); |
11693 | } |
11694 | |
11695 | at::Tensor self_; |
11696 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11697 | at::functionalization::impl::sync(self); |
11698 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11699 | } else { |
11700 | self_ = self; |
11701 | } |
11702 | |
11703 | at::Tensor out_; |
11704 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11705 | at::functionalization::impl::sync(out); |
11706 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11707 | } else { |
11708 | out_ = out; |
11709 | } |
11710 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11711 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11712 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11713 | TORCH_INTERNAL_ASSERT(false, |
11714 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11715 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11716 | } else { |
11717 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11718 | at::AutoDispatchSkipFunctionalize guard; |
11719 | at::Tensor tmp_output = at::_ops::bitwise_or_Scalar_out::call(self_, other, out_); |
11720 | return out;; |
11721 | } |
11722 | } else { |
11723 | at::Tensor tmp_output; |
11724 | { |
11725 | at::AutoDispatchSkipFunctionalize guard; |
11726 | tmp_output = at::_ops::bitwise_or_Scalar::call(self_, other); |
11727 | } |
11728 | at::functionalization::impl::replace_(out, tmp_output); |
11729 | at::functionalization::impl::commit_update(out); |
11730 | at::functionalization::impl::sync(out); |
11731 | return out; |
11732 | } |
11733 | } |
11734 | |
11735 | at::Tensor & bitwise_or__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
11736 | if (true) { |
11737 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11738 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11739 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11740 | auto self_meta = to_meta(self); |
11741 | at::AutoDispatchSkipFunctionalize func_guard; |
11742 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11743 | at::_ops::bitwise_or__Scalar::call(self_meta, other); |
11744 | } |
11745 | |
11746 | at::Tensor self_; |
11747 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11748 | at::functionalization::impl::sync(self); |
11749 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11750 | } else { |
11751 | self_ = self; |
11752 | } |
11753 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11754 | if ((false)) { |
11755 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11756 | TORCH_INTERNAL_ASSERT(false, |
11757 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11758 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11759 | } else { |
11760 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11761 | at::AutoDispatchSkipFunctionalize guard; |
11762 | at::Tensor tmp_output = at::_ops::bitwise_or__Scalar::call(self_, other); |
11763 | return self;; |
11764 | } |
11765 | } else { |
11766 | at::Tensor tmp_output; |
11767 | { |
11768 | at::AutoDispatchSkipFunctionalize guard; |
11769 | tmp_output = at::_ops::bitwise_or_Scalar::call(self_, other); |
11770 | } |
11771 | at::functionalization::impl::replace_(self, tmp_output); |
11772 | at::functionalization::impl::commit_update(self); |
11773 | at::functionalization::impl::sync(self); |
11774 | return self; |
11775 | } |
11776 | } |
11777 | |
11778 | at::Tensor & bitwise_or_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
11779 | if (false) { |
11780 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11781 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11782 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11783 | auto other_meta = to_meta(other); |
11784 | auto out_meta = to_meta(out); |
11785 | at::AutoDispatchSkipFunctionalize func_guard; |
11786 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11787 | at::_ops::bitwise_or_Scalar_Tensor_out::call(self, other_meta, out_meta); |
11788 | } |
11789 | |
11790 | at::Tensor other_; |
11791 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11792 | at::functionalization::impl::sync(other); |
11793 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11794 | } else { |
11795 | other_ = other; |
11796 | } |
11797 | |
11798 | at::Tensor out_; |
11799 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11800 | at::functionalization::impl::sync(out); |
11801 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11802 | } else { |
11803 | out_ = out; |
11804 | } |
11805 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11806 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11807 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11808 | TORCH_INTERNAL_ASSERT(false, |
11809 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11810 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11811 | } else { |
11812 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11813 | at::AutoDispatchSkipFunctionalize guard; |
11814 | at::Tensor tmp_output = at::_ops::bitwise_or_Scalar_Tensor_out::call(self, other_, out_); |
11815 | return out;; |
11816 | } |
11817 | } else { |
11818 | at::Tensor tmp_output; |
11819 | { |
11820 | at::AutoDispatchSkipFunctionalize guard; |
11821 | tmp_output = at::_ops::bitwise_or_Scalar_Tensor::call(self, other_); |
11822 | } |
11823 | at::functionalization::impl::replace_(out, tmp_output); |
11824 | at::functionalization::impl::commit_update(out); |
11825 | at::functionalization::impl::sync(out); |
11826 | return out; |
11827 | } |
11828 | } |
11829 | |
11830 | at::Tensor & bitwise_left_shift_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
11831 | if (false) { |
11832 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11833 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11834 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11835 | auto self_meta = to_meta(self); |
11836 | auto other_meta = to_meta(other); |
11837 | auto out_meta = to_meta(out); |
11838 | at::AutoDispatchSkipFunctionalize func_guard; |
11839 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11840 | at::_ops::bitwise_left_shift_Tensor_out::call(self_meta, other_meta, out_meta); |
11841 | } |
11842 | |
11843 | at::Tensor self_; |
11844 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11845 | at::functionalization::impl::sync(self); |
11846 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11847 | } else { |
11848 | self_ = self; |
11849 | } |
11850 | |
11851 | at::Tensor other_; |
11852 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11853 | at::functionalization::impl::sync(other); |
11854 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11855 | } else { |
11856 | other_ = other; |
11857 | } |
11858 | |
11859 | at::Tensor out_; |
11860 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11861 | at::functionalization::impl::sync(out); |
11862 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11863 | } else { |
11864 | out_ = out; |
11865 | } |
11866 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11867 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
11868 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11869 | TORCH_INTERNAL_ASSERT(false, |
11870 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11871 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11872 | } else { |
11873 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11874 | at::AutoDispatchSkipFunctionalize guard; |
11875 | at::Tensor tmp_output = at::_ops::bitwise_left_shift_Tensor_out::call(self_, other_, out_); |
11876 | return out;; |
11877 | } |
11878 | } else { |
11879 | at::Tensor tmp_output; |
11880 | { |
11881 | at::AutoDispatchSkipFunctionalize guard; |
11882 | tmp_output = at::_ops::bitwise_left_shift_Tensor::call(self_, other_); |
11883 | } |
11884 | at::functionalization::impl::replace_(out, tmp_output); |
11885 | at::functionalization::impl::commit_update(out); |
11886 | at::functionalization::impl::sync(out); |
11887 | return out; |
11888 | } |
11889 | } |
11890 | |
11891 | at::Tensor & bitwise_left_shift__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
11892 | if (true) { |
11893 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11894 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11895 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11896 | auto self_meta = to_meta(self); |
11897 | auto other_meta = to_meta(other); |
11898 | at::AutoDispatchSkipFunctionalize func_guard; |
11899 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11900 | at::_ops::bitwise_left_shift__Tensor::call(self_meta, other_meta); |
11901 | } |
11902 | |
11903 | at::Tensor self_; |
11904 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11905 | at::functionalization::impl::sync(self); |
11906 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11907 | } else { |
11908 | self_ = self; |
11909 | } |
11910 | |
11911 | at::Tensor other_; |
11912 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
11913 | at::functionalization::impl::sync(other); |
11914 | other_ = at::functionalization::impl::from_functional_tensor(other); |
11915 | } else { |
11916 | other_ = other; |
11917 | } |
11918 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
11919 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
11920 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11921 | TORCH_INTERNAL_ASSERT(false, |
11922 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11923 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11924 | } else { |
11925 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11926 | at::AutoDispatchSkipFunctionalize guard; |
11927 | at::Tensor tmp_output = at::_ops::bitwise_left_shift__Tensor::call(self_, other_); |
11928 | return self;; |
11929 | } |
11930 | } else { |
11931 | at::Tensor tmp_output; |
11932 | { |
11933 | at::AutoDispatchSkipFunctionalize guard; |
11934 | tmp_output = at::_ops::bitwise_left_shift_Tensor::call(self_, other_); |
11935 | } |
11936 | at::functionalization::impl::replace_(self, tmp_output); |
11937 | at::functionalization::impl::commit_update(self); |
11938 | at::functionalization::impl::sync(self); |
11939 | return self; |
11940 | } |
11941 | } |
11942 | |
11943 | at::Tensor & bitwise_left_shift_out_Tensor_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
11944 | if (false) { |
11945 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11946 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11947 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
11948 | auto self_meta = to_meta(self); |
11949 | auto out_meta = to_meta(out); |
11950 | at::AutoDispatchSkipFunctionalize func_guard; |
11951 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
11952 | at::_ops::bitwise_left_shift_Tensor_Scalar_out::call(self_meta, other, out_meta); |
11953 | } |
11954 | |
11955 | at::Tensor self_; |
11956 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
11957 | at::functionalization::impl::sync(self); |
11958 | self_ = at::functionalization::impl::from_functional_tensor(self); |
11959 | } else { |
11960 | self_ = self; |
11961 | } |
11962 | |
11963 | at::Tensor out_; |
11964 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
11965 | at::functionalization::impl::sync(out); |
11966 | out_ = at::functionalization::impl::from_functional_tensor(out); |
11967 | } else { |
11968 | out_ = out; |
11969 | } |
11970 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
11971 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
11972 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
11973 | TORCH_INTERNAL_ASSERT(false, |
11974 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
11975 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
11976 | } else { |
11977 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
11978 | at::AutoDispatchSkipFunctionalize guard; |
11979 | at::Tensor tmp_output = at::_ops::bitwise_left_shift_Tensor_Scalar_out::call(self_, other, out_); |
11980 | return out;; |
11981 | } |
11982 | } else { |
11983 | at::Tensor tmp_output; |
11984 | { |
11985 | at::AutoDispatchSkipFunctionalize guard; |
11986 | tmp_output = at::_ops::bitwise_left_shift_Tensor_Scalar::call(self_, other); |
11987 | } |
11988 | at::functionalization::impl::replace_(out, tmp_output); |
11989 | at::functionalization::impl::commit_update(out); |
11990 | at::functionalization::impl::sync(out); |
11991 | return out; |
11992 | } |
11993 | } |
11994 | |
11995 | at::Tensor & bitwise_left_shift__Tensor_Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
11996 | if (true) { |
11997 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
11998 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
11999 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12000 | auto self_meta = to_meta(self); |
12001 | at::AutoDispatchSkipFunctionalize func_guard; |
12002 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12003 | at::_ops::bitwise_left_shift__Tensor_Scalar::call(self_meta, other); |
12004 | } |
12005 | |
12006 | at::Tensor self_; |
12007 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12008 | at::functionalization::impl::sync(self); |
12009 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12010 | } else { |
12011 | self_ = self; |
12012 | } |
12013 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12014 | if ((false)) { |
12015 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12016 | TORCH_INTERNAL_ASSERT(false, |
12017 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12018 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12019 | } else { |
12020 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12021 | at::AutoDispatchSkipFunctionalize guard; |
12022 | at::Tensor tmp_output = at::_ops::bitwise_left_shift__Tensor_Scalar::call(self_, other); |
12023 | return self;; |
12024 | } |
12025 | } else { |
12026 | at::Tensor tmp_output; |
12027 | { |
12028 | at::AutoDispatchSkipFunctionalize guard; |
12029 | tmp_output = at::_ops::bitwise_left_shift_Tensor_Scalar::call(self_, other); |
12030 | } |
12031 | at::functionalization::impl::replace_(self, tmp_output); |
12032 | at::functionalization::impl::commit_update(self); |
12033 | at::functionalization::impl::sync(self); |
12034 | return self; |
12035 | } |
12036 | } |
12037 | |
12038 | at::Tensor & bitwise_left_shift_out_Scalar_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { |
12039 | if (false) { |
12040 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12041 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12042 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12043 | auto other_meta = to_meta(other); |
12044 | auto out_meta = to_meta(out); |
12045 | at::AutoDispatchSkipFunctionalize func_guard; |
12046 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12047 | at::_ops::bitwise_left_shift_Scalar_Tensor_out::call(self, other_meta, out_meta); |
12048 | } |
12049 | |
12050 | at::Tensor other_; |
12051 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12052 | at::functionalization::impl::sync(other); |
12053 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12054 | } else { |
12055 | other_ = other; |
12056 | } |
12057 | |
12058 | at::Tensor out_; |
12059 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12060 | at::functionalization::impl::sync(out); |
12061 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12062 | } else { |
12063 | out_ = out; |
12064 | } |
12065 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12066 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
12067 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12068 | TORCH_INTERNAL_ASSERT(false, |
12069 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12070 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12071 | } else { |
12072 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12073 | at::AutoDispatchSkipFunctionalize guard; |
12074 | at::Tensor tmp_output = at::_ops::bitwise_left_shift_Scalar_Tensor_out::call(self, other_, out_); |
12075 | return out;; |
12076 | } |
12077 | } else { |
12078 | at::Tensor tmp_output; |
12079 | { |
12080 | at::AutoDispatchSkipFunctionalize guard; |
12081 | tmp_output = at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other_); |
12082 | } |
12083 | at::functionalization::impl::replace_(out, tmp_output); |
12084 | at::functionalization::impl::commit_update(out); |
12085 | at::functionalization::impl::sync(out); |
12086 | return out; |
12087 | } |
12088 | } |
12089 | |
12090 | at::Tensor & __rshift___out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
12091 | if (false) { |
12092 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12093 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12094 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12095 | auto self_meta = to_meta(self); |
12096 | auto out_meta = to_meta(out); |
12097 | at::AutoDispatchSkipFunctionalize func_guard; |
12098 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12099 | at::_ops::__rshift___Scalar_out::call(self_meta, other, out_meta); |
12100 | } |
12101 | |
12102 | at::Tensor self_; |
12103 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12104 | at::functionalization::impl::sync(self); |
12105 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12106 | } else { |
12107 | self_ = self; |
12108 | } |
12109 | |
12110 | at::Tensor out_; |
12111 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12112 | at::functionalization::impl::sync(out); |
12113 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12114 | } else { |
12115 | out_ = out; |
12116 | } |
12117 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12118 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12119 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12120 | TORCH_INTERNAL_ASSERT(false, |
12121 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12122 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12123 | } else { |
12124 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12125 | at::AutoDispatchSkipFunctionalize guard; |
12126 | at::Tensor tmp_output = at::_ops::__rshift___Scalar_out::call(self_, other, out_); |
12127 | return out;; |
12128 | } |
12129 | } else { |
12130 | at::Tensor tmp_output; |
12131 | { |
12132 | at::AutoDispatchSkipFunctionalize guard; |
12133 | tmp_output = at::_ops::__rshift___Scalar::call(self_, other); |
12134 | } |
12135 | at::functionalization::impl::replace_(out, tmp_output); |
12136 | at::functionalization::impl::commit_update(out); |
12137 | at::functionalization::impl::sync(out); |
12138 | return out; |
12139 | } |
12140 | } |
12141 | |
12142 | at::Tensor & __irshift___Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
12143 | if (true) { |
12144 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12145 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12146 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12147 | auto self_meta = to_meta(self); |
12148 | at::AutoDispatchSkipFunctionalize func_guard; |
12149 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12150 | at::_ops::__irshift___Scalar::call(self_meta, other); |
12151 | } |
12152 | |
12153 | at::Tensor self_; |
12154 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12155 | at::functionalization::impl::sync(self); |
12156 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12157 | } else { |
12158 | self_ = self; |
12159 | } |
12160 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12161 | if ((false)) { |
12162 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12163 | TORCH_INTERNAL_ASSERT(false, |
12164 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12165 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12166 | } else { |
12167 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12168 | at::AutoDispatchSkipFunctionalize guard; |
12169 | at::Tensor tmp_output = at::_ops::__irshift___Scalar::call(self_, other); |
12170 | return self;; |
12171 | } |
12172 | } else { |
12173 | at::Tensor tmp_output; |
12174 | { |
12175 | at::AutoDispatchSkipFunctionalize guard; |
12176 | tmp_output = at::_ops::__rshift___Scalar::call(self_, other); |
12177 | } |
12178 | at::functionalization::impl::replace_(self, tmp_output); |
12179 | at::functionalization::impl::commit_update(self); |
12180 | at::functionalization::impl::sync(self); |
12181 | return self; |
12182 | } |
12183 | } |
12184 | |
12185 | at::Tensor & __rshift___out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
12186 | if (false) { |
12187 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12188 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12189 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12190 | auto self_meta = to_meta(self); |
12191 | auto other_meta = to_meta(other); |
12192 | auto out_meta = to_meta(out); |
12193 | at::AutoDispatchSkipFunctionalize func_guard; |
12194 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12195 | at::_ops::__rshift___Tensor_out::call(self_meta, other_meta, out_meta); |
12196 | } |
12197 | |
12198 | at::Tensor self_; |
12199 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12200 | at::functionalization::impl::sync(self); |
12201 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12202 | } else { |
12203 | self_ = self; |
12204 | } |
12205 | |
12206 | at::Tensor other_; |
12207 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12208 | at::functionalization::impl::sync(other); |
12209 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12210 | } else { |
12211 | other_ = other; |
12212 | } |
12213 | |
12214 | at::Tensor out_; |
12215 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12216 | at::functionalization::impl::sync(out); |
12217 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12218 | } else { |
12219 | out_ = out; |
12220 | } |
12221 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12222 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
12223 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12224 | TORCH_INTERNAL_ASSERT(false, |
12225 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12226 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12227 | } else { |
12228 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12229 | at::AutoDispatchSkipFunctionalize guard; |
12230 | at::Tensor tmp_output = at::_ops::__rshift___Tensor_out::call(self_, other_, out_); |
12231 | return out;; |
12232 | } |
12233 | } else { |
12234 | at::Tensor tmp_output; |
12235 | { |
12236 | at::AutoDispatchSkipFunctionalize guard; |
12237 | tmp_output = at::_ops::__rshift___Tensor::call(self_, other_); |
12238 | } |
12239 | at::functionalization::impl::replace_(out, tmp_output); |
12240 | at::functionalization::impl::commit_update(out); |
12241 | at::functionalization::impl::sync(out); |
12242 | return out; |
12243 | } |
12244 | } |
12245 | |
12246 | at::Tensor & __irshift___Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
12247 | if (true) { |
12248 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12249 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12250 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12251 | auto self_meta = to_meta(self); |
12252 | auto other_meta = to_meta(other); |
12253 | at::AutoDispatchSkipFunctionalize func_guard; |
12254 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12255 | at::_ops::__irshift___Tensor::call(self_meta, other_meta); |
12256 | } |
12257 | |
12258 | at::Tensor self_; |
12259 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12260 | at::functionalization::impl::sync(self); |
12261 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12262 | } else { |
12263 | self_ = self; |
12264 | } |
12265 | |
12266 | at::Tensor other_; |
12267 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
12268 | at::functionalization::impl::sync(other); |
12269 | other_ = at::functionalization::impl::from_functional_tensor(other); |
12270 | } else { |
12271 | other_ = other; |
12272 | } |
12273 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12274 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
12275 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12276 | TORCH_INTERNAL_ASSERT(false, |
12277 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12278 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12279 | } else { |
12280 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12281 | at::AutoDispatchSkipFunctionalize guard; |
12282 | at::Tensor tmp_output = at::_ops::__irshift___Tensor::call(self_, other_); |
12283 | return self;; |
12284 | } |
12285 | } else { |
12286 | at::Tensor tmp_output; |
12287 | { |
12288 | at::AutoDispatchSkipFunctionalize guard; |
12289 | tmp_output = at::_ops::__rshift___Tensor::call(self_, other_); |
12290 | } |
12291 | at::functionalization::impl::replace_(self, tmp_output); |
12292 | at::functionalization::impl::commit_update(self); |
12293 | at::functionalization::impl::sync(self); |
12294 | return self; |
12295 | } |
12296 | } |
12297 | |
12298 | at::Tensor & tril_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { |
12299 | if (false) { |
12300 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12301 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12302 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12303 | auto self_meta = to_meta(self); |
12304 | auto out_meta = to_meta(out); |
12305 | at::AutoDispatchSkipFunctionalize func_guard; |
12306 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12307 | at::_ops::tril_out::call(self_meta, diagonal, out_meta); |
12308 | } |
12309 | |
12310 | at::Tensor self_; |
12311 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12312 | at::functionalization::impl::sync(self); |
12313 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12314 | } else { |
12315 | self_ = self; |
12316 | } |
12317 | |
12318 | at::Tensor out_; |
12319 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12320 | at::functionalization::impl::sync(out); |
12321 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12322 | } else { |
12323 | out_ = out; |
12324 | } |
12325 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12326 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12327 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12328 | TORCH_INTERNAL_ASSERT(false, |
12329 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12330 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12331 | } else { |
12332 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12333 | at::AutoDispatchSkipFunctionalize guard; |
12334 | at::Tensor tmp_output = at::_ops::tril_out::call(self_, diagonal, out_); |
12335 | return out;; |
12336 | } |
12337 | } else { |
12338 | at::Tensor tmp_output; |
12339 | { |
12340 | at::AutoDispatchSkipFunctionalize guard; |
12341 | tmp_output = at::_ops::tril::call(self_, diagonal); |
12342 | } |
12343 | at::functionalization::impl::replace_(out, tmp_output); |
12344 | at::functionalization::impl::commit_update(out); |
12345 | at::functionalization::impl::sync(out); |
12346 | return out; |
12347 | } |
12348 | } |
12349 | |
12350 | at::Tensor & tril_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) { |
12351 | if (true) { |
12352 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12353 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12354 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12355 | auto self_meta = to_meta(self); |
12356 | at::AutoDispatchSkipFunctionalize func_guard; |
12357 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12358 | at::_ops::tril_::call(self_meta, diagonal); |
12359 | } |
12360 | |
12361 | at::Tensor self_; |
12362 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12363 | at::functionalization::impl::sync(self); |
12364 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12365 | } else { |
12366 | self_ = self; |
12367 | } |
12368 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12369 | if ((false)) { |
12370 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12371 | TORCH_INTERNAL_ASSERT(false, |
12372 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12373 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12374 | } else { |
12375 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12376 | at::AutoDispatchSkipFunctionalize guard; |
12377 | at::Tensor tmp_output = at::_ops::tril_::call(self_, diagonal); |
12378 | return self;; |
12379 | } |
12380 | } else { |
12381 | at::Tensor tmp_output; |
12382 | { |
12383 | at::AutoDispatchSkipFunctionalize guard; |
12384 | tmp_output = at::_ops::tril::call(self_, diagonal); |
12385 | } |
12386 | at::functionalization::impl::replace_(self, tmp_output); |
12387 | at::functionalization::impl::commit_update(self); |
12388 | at::functionalization::impl::sync(self); |
12389 | return self; |
12390 | } |
12391 | } |
12392 | |
12393 | at::Tensor & triu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { |
12394 | if (false) { |
12395 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12396 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12397 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12398 | auto self_meta = to_meta(self); |
12399 | auto out_meta = to_meta(out); |
12400 | at::AutoDispatchSkipFunctionalize func_guard; |
12401 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12402 | at::_ops::triu_out::call(self_meta, diagonal, out_meta); |
12403 | } |
12404 | |
12405 | at::Tensor self_; |
12406 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12407 | at::functionalization::impl::sync(self); |
12408 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12409 | } else { |
12410 | self_ = self; |
12411 | } |
12412 | |
12413 | at::Tensor out_; |
12414 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12415 | at::functionalization::impl::sync(out); |
12416 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12417 | } else { |
12418 | out_ = out; |
12419 | } |
12420 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12421 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12422 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12423 | TORCH_INTERNAL_ASSERT(false, |
12424 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12425 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12426 | } else { |
12427 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12428 | at::AutoDispatchSkipFunctionalize guard; |
12429 | at::Tensor tmp_output = at::_ops::triu_out::call(self_, diagonal, out_); |
12430 | return out;; |
12431 | } |
12432 | } else { |
12433 | at::Tensor tmp_output; |
12434 | { |
12435 | at::AutoDispatchSkipFunctionalize guard; |
12436 | tmp_output = at::_ops::triu::call(self_, diagonal); |
12437 | } |
12438 | at::functionalization::impl::replace_(out, tmp_output); |
12439 | at::functionalization::impl::commit_update(out); |
12440 | at::functionalization::impl::sync(out); |
12441 | return out; |
12442 | } |
12443 | } |
12444 | |
12445 | at::Tensor & triu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal) { |
12446 | if (true) { |
12447 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12448 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12449 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12450 | auto self_meta = to_meta(self); |
12451 | at::AutoDispatchSkipFunctionalize func_guard; |
12452 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12453 | at::_ops::triu_::call(self_meta, diagonal); |
12454 | } |
12455 | |
12456 | at::Tensor self_; |
12457 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12458 | at::functionalization::impl::sync(self); |
12459 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12460 | } else { |
12461 | self_ = self; |
12462 | } |
12463 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12464 | if ((false)) { |
12465 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12466 | TORCH_INTERNAL_ASSERT(false, |
12467 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12468 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12469 | } else { |
12470 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12471 | at::AutoDispatchSkipFunctionalize guard; |
12472 | at::Tensor tmp_output = at::_ops::triu_::call(self_, diagonal); |
12473 | return self;; |
12474 | } |
12475 | } else { |
12476 | at::Tensor tmp_output; |
12477 | { |
12478 | at::AutoDispatchSkipFunctionalize guard; |
12479 | tmp_output = at::_ops::triu::call(self_, diagonal); |
12480 | } |
12481 | at::functionalization::impl::replace_(self, tmp_output); |
12482 | at::functionalization::impl::commit_update(self); |
12483 | at::functionalization::impl::sync(self); |
12484 | return self; |
12485 | } |
12486 | } |
12487 | |
12488 | at::Tensor & digamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
12489 | if (false) { |
12490 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12491 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12492 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12493 | auto self_meta = to_meta(self); |
12494 | auto out_meta = to_meta(out); |
12495 | at::AutoDispatchSkipFunctionalize func_guard; |
12496 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12497 | at::_ops::digamma_out::call(self_meta, out_meta); |
12498 | } |
12499 | |
12500 | at::Tensor self_; |
12501 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12502 | at::functionalization::impl::sync(self); |
12503 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12504 | } else { |
12505 | self_ = self; |
12506 | } |
12507 | |
12508 | at::Tensor out_; |
12509 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12510 | at::functionalization::impl::sync(out); |
12511 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12512 | } else { |
12513 | out_ = out; |
12514 | } |
12515 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12516 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12517 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12518 | TORCH_INTERNAL_ASSERT(false, |
12519 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12520 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12521 | } else { |
12522 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12523 | at::AutoDispatchSkipFunctionalize guard; |
12524 | at::Tensor tmp_output = at::_ops::digamma_out::call(self_, out_); |
12525 | return out;; |
12526 | } |
12527 | } else { |
12528 | at::Tensor tmp_output; |
12529 | { |
12530 | at::AutoDispatchSkipFunctionalize guard; |
12531 | tmp_output = at::_ops::digamma::call(self_); |
12532 | } |
12533 | at::functionalization::impl::replace_(out, tmp_output); |
12534 | at::functionalization::impl::commit_update(out); |
12535 | at::functionalization::impl::sync(out); |
12536 | return out; |
12537 | } |
12538 | } |
12539 | |
12540 | at::Tensor & digamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
12541 | if (true) { |
12542 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12543 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12544 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12545 | auto self_meta = to_meta(self); |
12546 | at::AutoDispatchSkipFunctionalize func_guard; |
12547 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12548 | at::_ops::digamma_::call(self_meta); |
12549 | } |
12550 | |
12551 | at::Tensor self_; |
12552 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12553 | at::functionalization::impl::sync(self); |
12554 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12555 | } else { |
12556 | self_ = self; |
12557 | } |
12558 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12559 | if ((false)) { |
12560 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12561 | TORCH_INTERNAL_ASSERT(false, |
12562 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12563 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12564 | } else { |
12565 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12566 | at::AutoDispatchSkipFunctionalize guard; |
12567 | at::Tensor tmp_output = at::_ops::digamma_::call(self_); |
12568 | return self;; |
12569 | } |
12570 | } else { |
12571 | at::Tensor tmp_output; |
12572 | { |
12573 | at::AutoDispatchSkipFunctionalize guard; |
12574 | tmp_output = at::_ops::digamma::call(self_); |
12575 | } |
12576 | at::functionalization::impl::replace_(self, tmp_output); |
12577 | at::functionalization::impl::commit_update(self); |
12578 | at::functionalization::impl::sync(self); |
12579 | return self; |
12580 | } |
12581 | } |
12582 | |
12583 | at::Tensor & lerp_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) { |
12584 | if (false) { |
12585 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12586 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12587 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12588 | auto self_meta = to_meta(self); |
12589 | auto end_meta = to_meta(end); |
12590 | auto out_meta = to_meta(out); |
12591 | at::AutoDispatchSkipFunctionalize func_guard; |
12592 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12593 | at::_ops::lerp_Scalar_out::call(self_meta, end_meta, weight, out_meta); |
12594 | } |
12595 | |
12596 | at::Tensor self_; |
12597 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12598 | at::functionalization::impl::sync(self); |
12599 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12600 | } else { |
12601 | self_ = self; |
12602 | } |
12603 | |
12604 | at::Tensor end_; |
12605 | if (at::functionalization::impl::isFunctionalTensor(end)) { |
12606 | at::functionalization::impl::sync(end); |
12607 | end_ = at::functionalization::impl::from_functional_tensor(end); |
12608 | } else { |
12609 | end_ = end; |
12610 | } |
12611 | |
12612 | at::Tensor out_; |
12613 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12614 | at::functionalization::impl::sync(out); |
12615 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12616 | } else { |
12617 | out_ = out; |
12618 | } |
12619 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12620 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(end))) { |
12621 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12622 | TORCH_INTERNAL_ASSERT(false, |
12623 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12624 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12625 | } else { |
12626 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12627 | at::AutoDispatchSkipFunctionalize guard; |
12628 | at::Tensor tmp_output = at::_ops::lerp_Scalar_out::call(self_, end_, weight, out_); |
12629 | return out;; |
12630 | } |
12631 | } else { |
12632 | at::Tensor tmp_output; |
12633 | { |
12634 | at::AutoDispatchSkipFunctionalize guard; |
12635 | tmp_output = at::_ops::lerp_Scalar::call(self_, end_, weight); |
12636 | } |
12637 | at::functionalization::impl::replace_(out, tmp_output); |
12638 | at::functionalization::impl::commit_update(out); |
12639 | at::functionalization::impl::sync(out); |
12640 | return out; |
12641 | } |
12642 | } |
12643 | |
12644 | at::Tensor & lerp__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { |
12645 | if (true) { |
12646 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12647 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12648 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12649 | auto self_meta = to_meta(self); |
12650 | auto end_meta = to_meta(end); |
12651 | at::AutoDispatchSkipFunctionalize func_guard; |
12652 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12653 | at::_ops::lerp__Scalar::call(self_meta, end_meta, weight); |
12654 | } |
12655 | |
12656 | at::Tensor self_; |
12657 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12658 | at::functionalization::impl::sync(self); |
12659 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12660 | } else { |
12661 | self_ = self; |
12662 | } |
12663 | |
12664 | at::Tensor end_; |
12665 | if (at::functionalization::impl::isFunctionalTensor(end)) { |
12666 | at::functionalization::impl::sync(end); |
12667 | end_ = at::functionalization::impl::from_functional_tensor(end); |
12668 | } else { |
12669 | end_ = end; |
12670 | } |
12671 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12672 | if ((false || at::functionalization::impl::isFunctionalTensor(end))) { |
12673 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12674 | TORCH_INTERNAL_ASSERT(false, |
12675 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12676 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12677 | } else { |
12678 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12679 | at::AutoDispatchSkipFunctionalize guard; |
12680 | at::Tensor tmp_output = at::_ops::lerp__Scalar::call(self_, end_, weight); |
12681 | return self;; |
12682 | } |
12683 | } else { |
12684 | at::Tensor tmp_output; |
12685 | { |
12686 | at::AutoDispatchSkipFunctionalize guard; |
12687 | tmp_output = at::_ops::lerp_Scalar::call(self_, end_, weight); |
12688 | } |
12689 | at::functionalization::impl::replace_(self, tmp_output); |
12690 | at::functionalization::impl::commit_update(self); |
12691 | at::functionalization::impl::sync(self); |
12692 | return self; |
12693 | } |
12694 | } |
12695 | |
12696 | at::Tensor & lerp_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) { |
12697 | if (false) { |
12698 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12699 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12700 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12701 | auto self_meta = to_meta(self); |
12702 | auto end_meta = to_meta(end); |
12703 | auto weight_meta = to_meta(weight); |
12704 | auto out_meta = to_meta(out); |
12705 | at::AutoDispatchSkipFunctionalize func_guard; |
12706 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12707 | at::_ops::lerp_Tensor_out::call(self_meta, end_meta, weight_meta, out_meta); |
12708 | } |
12709 | |
12710 | at::Tensor self_; |
12711 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12712 | at::functionalization::impl::sync(self); |
12713 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12714 | } else { |
12715 | self_ = self; |
12716 | } |
12717 | |
12718 | at::Tensor end_; |
12719 | if (at::functionalization::impl::isFunctionalTensor(end)) { |
12720 | at::functionalization::impl::sync(end); |
12721 | end_ = at::functionalization::impl::from_functional_tensor(end); |
12722 | } else { |
12723 | end_ = end; |
12724 | } |
12725 | |
12726 | at::Tensor weight_; |
12727 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
12728 | at::functionalization::impl::sync(weight); |
12729 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
12730 | } else { |
12731 | weight_ = weight; |
12732 | } |
12733 | |
12734 | at::Tensor out_; |
12735 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12736 | at::functionalization::impl::sync(out); |
12737 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12738 | } else { |
12739 | out_ = out; |
12740 | } |
12741 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12742 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(end) || at::functionalization::impl::isFunctionalTensor(weight))) { |
12743 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12744 | TORCH_INTERNAL_ASSERT(false, |
12745 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12746 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12747 | } else { |
12748 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12749 | at::AutoDispatchSkipFunctionalize guard; |
12750 | at::Tensor tmp_output = at::_ops::lerp_Tensor_out::call(self_, end_, weight_, out_); |
12751 | return out;; |
12752 | } |
12753 | } else { |
12754 | at::Tensor tmp_output; |
12755 | { |
12756 | at::AutoDispatchSkipFunctionalize guard; |
12757 | tmp_output = at::_ops::lerp_Tensor::call(self_, end_, weight_); |
12758 | } |
12759 | at::functionalization::impl::replace_(out, tmp_output); |
12760 | at::functionalization::impl::commit_update(out); |
12761 | at::functionalization::impl::sync(out); |
12762 | return out; |
12763 | } |
12764 | } |
12765 | |
12766 | at::Tensor & lerp__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { |
12767 | if (true) { |
12768 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12769 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12770 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12771 | auto self_meta = to_meta(self); |
12772 | auto end_meta = to_meta(end); |
12773 | auto weight_meta = to_meta(weight); |
12774 | at::AutoDispatchSkipFunctionalize func_guard; |
12775 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12776 | at::_ops::lerp__Tensor::call(self_meta, end_meta, weight_meta); |
12777 | } |
12778 | |
12779 | at::Tensor self_; |
12780 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12781 | at::functionalization::impl::sync(self); |
12782 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12783 | } else { |
12784 | self_ = self; |
12785 | } |
12786 | |
12787 | at::Tensor end_; |
12788 | if (at::functionalization::impl::isFunctionalTensor(end)) { |
12789 | at::functionalization::impl::sync(end); |
12790 | end_ = at::functionalization::impl::from_functional_tensor(end); |
12791 | } else { |
12792 | end_ = end; |
12793 | } |
12794 | |
12795 | at::Tensor weight_; |
12796 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
12797 | at::functionalization::impl::sync(weight); |
12798 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
12799 | } else { |
12800 | weight_ = weight; |
12801 | } |
12802 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12803 | if ((false || at::functionalization::impl::isFunctionalTensor(end) || at::functionalization::impl::isFunctionalTensor(weight))) { |
12804 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12805 | TORCH_INTERNAL_ASSERT(false, |
12806 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12807 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12808 | } else { |
12809 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12810 | at::AutoDispatchSkipFunctionalize guard; |
12811 | at::Tensor tmp_output = at::_ops::lerp__Tensor::call(self_, end_, weight_); |
12812 | return self;; |
12813 | } |
12814 | } else { |
12815 | at::Tensor tmp_output; |
12816 | { |
12817 | at::AutoDispatchSkipFunctionalize guard; |
12818 | tmp_output = at::_ops::lerp_Tensor::call(self_, end_, weight_); |
12819 | } |
12820 | at::functionalization::impl::replace_(self, tmp_output); |
12821 | at::functionalization::impl::commit_update(self); |
12822 | at::functionalization::impl::sync(self); |
12823 | return self; |
12824 | } |
12825 | } |
12826 | |
12827 | at::Tensor & uniform_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator, at::Tensor & out) { |
12828 | if (false) { |
12829 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12830 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12831 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12832 | auto self_meta = to_meta(self); |
12833 | auto out_meta = to_meta(out); |
12834 | at::AutoDispatchSkipFunctionalize func_guard; |
12835 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12836 | at::_ops::uniform_out::call(self_meta, from, to, generator, out_meta); |
12837 | } |
12838 | |
12839 | at::Tensor self_; |
12840 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12841 | at::functionalization::impl::sync(self); |
12842 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12843 | } else { |
12844 | self_ = self; |
12845 | } |
12846 | |
12847 | at::Tensor out_; |
12848 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12849 | at::functionalization::impl::sync(out); |
12850 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12851 | } else { |
12852 | out_ = out; |
12853 | } |
12854 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12855 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
12856 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12857 | TORCH_INTERNAL_ASSERT(false, |
12858 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12859 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12860 | } else { |
12861 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12862 | at::AutoDispatchSkipFunctionalize guard; |
12863 | at::Tensor tmp_output = at::_ops::uniform_out::call(self_, from, to, generator, out_); |
12864 | return out;; |
12865 | } |
12866 | } else { |
12867 | at::Tensor tmp_output; |
12868 | { |
12869 | at::AutoDispatchSkipFunctionalize guard; |
12870 | tmp_output = at::_ops::uniform::call(self_, from, to, generator); |
12871 | } |
12872 | at::functionalization::impl::replace_(out, tmp_output); |
12873 | at::functionalization::impl::commit_update(out); |
12874 | at::functionalization::impl::sync(out); |
12875 | return out; |
12876 | } |
12877 | } |
12878 | |
12879 | at::Tensor & uniform_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) { |
12880 | if (true) { |
12881 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12882 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12883 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12884 | auto self_meta = to_meta(self); |
12885 | at::AutoDispatchSkipFunctionalize func_guard; |
12886 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12887 | at::_ops::uniform_::call(self_meta, from, to, generator); |
12888 | } |
12889 | |
12890 | at::Tensor self_; |
12891 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
12892 | at::functionalization::impl::sync(self); |
12893 | self_ = at::functionalization::impl::from_functional_tensor(self); |
12894 | } else { |
12895 | self_ = self; |
12896 | } |
12897 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
12898 | if ((false)) { |
12899 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12900 | TORCH_INTERNAL_ASSERT(false, |
12901 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12902 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12903 | } else { |
12904 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12905 | at::AutoDispatchSkipFunctionalize guard; |
12906 | at::Tensor tmp_output = at::_ops::uniform_::call(self_, from, to, generator); |
12907 | return self;; |
12908 | } |
12909 | } else { |
12910 | at::Tensor tmp_output; |
12911 | { |
12912 | at::AutoDispatchSkipFunctionalize guard; |
12913 | tmp_output = at::_ops::uniform::call(self_, from, to, generator); |
12914 | } |
12915 | at::functionalization::impl::replace_(self, tmp_output); |
12916 | at::functionalization::impl::commit_update(self); |
12917 | at::functionalization::impl::sync(self); |
12918 | return self; |
12919 | } |
12920 | } |
12921 | |
12922 | at::Tensor & tril_indices_out_out(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
12923 | if (false) { |
12924 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12925 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12926 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12927 | auto out_meta = to_meta(out); |
12928 | at::AutoDispatchSkipFunctionalize func_guard; |
12929 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12930 | at::_ops::tril_indices_out::call(row, col, offset, out_meta); |
12931 | } |
12932 | |
12933 | at::Tensor out_; |
12934 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12935 | at::functionalization::impl::sync(out); |
12936 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12937 | } else { |
12938 | out_ = out; |
12939 | } |
12940 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12941 | if ((false)) { |
12942 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12943 | TORCH_INTERNAL_ASSERT(false, |
12944 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12945 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12946 | } else { |
12947 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12948 | at::AutoDispatchSkipFunctionalize guard; |
12949 | at::Tensor tmp_output = at::_ops::tril_indices_out::call(row, col, offset, out_); |
12950 | return out;; |
12951 | } |
12952 | } else { |
12953 | at::Tensor tmp_output; |
12954 | { |
12955 | at::AutoDispatchSkipFunctionalize guard; |
12956 | tmp_output = at::_ops::tril_indices::call(row, col, offset, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
12957 | } |
12958 | at::functionalization::impl::replace_(out, tmp_output); |
12959 | at::functionalization::impl::commit_update(out); |
12960 | at::functionalization::impl::sync(out); |
12961 | return out; |
12962 | } |
12963 | } |
12964 | |
12965 | at::Tensor & triu_indices_out_out(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) { |
12966 | if (false) { |
12967 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
12968 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
12969 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
12970 | auto out_meta = to_meta(out); |
12971 | at::AutoDispatchSkipFunctionalize func_guard; |
12972 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
12973 | at::_ops::triu_indices_out::call(row, col, offset, out_meta); |
12974 | } |
12975 | |
12976 | at::Tensor out_; |
12977 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
12978 | at::functionalization::impl::sync(out); |
12979 | out_ = at::functionalization::impl::from_functional_tensor(out); |
12980 | } else { |
12981 | out_ = out; |
12982 | } |
12983 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
12984 | if ((false)) { |
12985 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
12986 | TORCH_INTERNAL_ASSERT(false, |
12987 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
12988 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
12989 | } else { |
12990 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
12991 | at::AutoDispatchSkipFunctionalize guard; |
12992 | at::Tensor tmp_output = at::_ops::triu_indices_out::call(row, col, offset, out_); |
12993 | return out;; |
12994 | } |
12995 | } else { |
12996 | at::Tensor tmp_output; |
12997 | { |
12998 | at::AutoDispatchSkipFunctionalize guard; |
12999 | tmp_output = at::_ops::triu_indices::call(row, col, offset, out_.scalar_type(), out_.layout(), out_.device(), c10::nullopt); |
13000 | } |
13001 | at::functionalization::impl::replace_(out, tmp_output); |
13002 | at::functionalization::impl::commit_update(out); |
13003 | at::functionalization::impl::sync(out); |
13004 | return out; |
13005 | } |
13006 | } |
13007 | |
13008 | at::Tensor & ge_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13009 | if (false) { |
13010 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13011 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13012 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13013 | auto self_meta = to_meta(self); |
13014 | auto out_meta = to_meta(out); |
13015 | at::AutoDispatchSkipFunctionalize func_guard; |
13016 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13017 | at::_ops::ge_Scalar_out::call(self_meta, other, out_meta); |
13018 | } |
13019 | |
13020 | at::Tensor self_; |
13021 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13022 | at::functionalization::impl::sync(self); |
13023 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13024 | } else { |
13025 | self_ = self; |
13026 | } |
13027 | |
13028 | at::Tensor out_; |
13029 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13030 | at::functionalization::impl::sync(out); |
13031 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13032 | } else { |
13033 | out_ = out; |
13034 | } |
13035 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13036 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13037 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13038 | TORCH_INTERNAL_ASSERT(false, |
13039 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13040 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13041 | } else { |
13042 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13043 | at::AutoDispatchSkipFunctionalize guard; |
13044 | at::Tensor tmp_output = at::_ops::ge_Scalar_out::call(self_, other, out_); |
13045 | return out;; |
13046 | } |
13047 | } else { |
13048 | at::Tensor tmp_output; |
13049 | { |
13050 | at::AutoDispatchSkipFunctionalize guard; |
13051 | tmp_output = at::_ops::ge_Scalar::call(self_, other); |
13052 | } |
13053 | at::functionalization::impl::replace_(out, tmp_output); |
13054 | at::functionalization::impl::commit_update(out); |
13055 | at::functionalization::impl::sync(out); |
13056 | return out; |
13057 | } |
13058 | } |
13059 | |
13060 | at::Tensor & ge__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13061 | if (true) { |
13062 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13063 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13064 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13065 | auto self_meta = to_meta(self); |
13066 | at::AutoDispatchSkipFunctionalize func_guard; |
13067 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13068 | at::_ops::ge__Scalar::call(self_meta, other); |
13069 | } |
13070 | |
13071 | at::Tensor self_; |
13072 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13073 | at::functionalization::impl::sync(self); |
13074 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13075 | } else { |
13076 | self_ = self; |
13077 | } |
13078 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13079 | if ((false)) { |
13080 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13081 | TORCH_INTERNAL_ASSERT(false, |
13082 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13083 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13084 | } else { |
13085 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13086 | at::AutoDispatchSkipFunctionalize guard; |
13087 | at::Tensor tmp_output = at::_ops::ge__Scalar::call(self_, other); |
13088 | return self;; |
13089 | } |
13090 | } else { |
13091 | at::Tensor tmp_output; |
13092 | { |
13093 | at::AutoDispatchSkipFunctionalize guard; |
13094 | tmp_output = at::_ops::ge_Scalar::call(self_, other); |
13095 | } |
13096 | at::functionalization::impl::replace_(self, tmp_output); |
13097 | at::functionalization::impl::commit_update(self); |
13098 | at::functionalization::impl::sync(self); |
13099 | return self; |
13100 | } |
13101 | } |
13102 | |
13103 | at::Tensor & ge_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13104 | if (false) { |
13105 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13106 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13107 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13108 | auto self_meta = to_meta(self); |
13109 | auto other_meta = to_meta(other); |
13110 | auto out_meta = to_meta(out); |
13111 | at::AutoDispatchSkipFunctionalize func_guard; |
13112 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13113 | at::_ops::ge_Tensor_out::call(self_meta, other_meta, out_meta); |
13114 | } |
13115 | |
13116 | at::Tensor self_; |
13117 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13118 | at::functionalization::impl::sync(self); |
13119 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13120 | } else { |
13121 | self_ = self; |
13122 | } |
13123 | |
13124 | at::Tensor other_; |
13125 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13126 | at::functionalization::impl::sync(other); |
13127 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13128 | } else { |
13129 | other_ = other; |
13130 | } |
13131 | |
13132 | at::Tensor out_; |
13133 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13134 | at::functionalization::impl::sync(out); |
13135 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13136 | } else { |
13137 | out_ = out; |
13138 | } |
13139 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13140 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13141 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13142 | TORCH_INTERNAL_ASSERT(false, |
13143 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13144 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13145 | } else { |
13146 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13147 | at::AutoDispatchSkipFunctionalize guard; |
13148 | at::Tensor tmp_output = at::_ops::ge_Tensor_out::call(self_, other_, out_); |
13149 | return out;; |
13150 | } |
13151 | } else { |
13152 | at::Tensor tmp_output; |
13153 | { |
13154 | at::AutoDispatchSkipFunctionalize guard; |
13155 | tmp_output = at::_ops::ge_Tensor::call(self_, other_); |
13156 | } |
13157 | at::functionalization::impl::replace_(out, tmp_output); |
13158 | at::functionalization::impl::commit_update(out); |
13159 | at::functionalization::impl::sync(out); |
13160 | return out; |
13161 | } |
13162 | } |
13163 | |
13164 | at::Tensor & ge__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13165 | if (true) { |
13166 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13167 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13168 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13169 | auto self_meta = to_meta(self); |
13170 | auto other_meta = to_meta(other); |
13171 | at::AutoDispatchSkipFunctionalize func_guard; |
13172 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13173 | at::_ops::ge__Tensor::call(self_meta, other_meta); |
13174 | } |
13175 | |
13176 | at::Tensor self_; |
13177 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13178 | at::functionalization::impl::sync(self); |
13179 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13180 | } else { |
13181 | self_ = self; |
13182 | } |
13183 | |
13184 | at::Tensor other_; |
13185 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13186 | at::functionalization::impl::sync(other); |
13187 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13188 | } else { |
13189 | other_ = other; |
13190 | } |
13191 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13192 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13193 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13194 | TORCH_INTERNAL_ASSERT(false, |
13195 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13196 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13197 | } else { |
13198 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13199 | at::AutoDispatchSkipFunctionalize guard; |
13200 | at::Tensor tmp_output = at::_ops::ge__Tensor::call(self_, other_); |
13201 | return self;; |
13202 | } |
13203 | } else { |
13204 | at::Tensor tmp_output; |
13205 | { |
13206 | at::AutoDispatchSkipFunctionalize guard; |
13207 | tmp_output = at::_ops::ge_Tensor::call(self_, other_); |
13208 | } |
13209 | at::functionalization::impl::replace_(self, tmp_output); |
13210 | at::functionalization::impl::commit_update(self); |
13211 | at::functionalization::impl::sync(self); |
13212 | return self; |
13213 | } |
13214 | } |
13215 | |
13216 | at::Tensor & le_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13217 | if (false) { |
13218 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13219 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13220 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13221 | auto self_meta = to_meta(self); |
13222 | auto out_meta = to_meta(out); |
13223 | at::AutoDispatchSkipFunctionalize func_guard; |
13224 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13225 | at::_ops::le_Scalar_out::call(self_meta, other, out_meta); |
13226 | } |
13227 | |
13228 | at::Tensor self_; |
13229 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13230 | at::functionalization::impl::sync(self); |
13231 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13232 | } else { |
13233 | self_ = self; |
13234 | } |
13235 | |
13236 | at::Tensor out_; |
13237 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13238 | at::functionalization::impl::sync(out); |
13239 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13240 | } else { |
13241 | out_ = out; |
13242 | } |
13243 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13244 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13245 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13246 | TORCH_INTERNAL_ASSERT(false, |
13247 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13248 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13249 | } else { |
13250 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13251 | at::AutoDispatchSkipFunctionalize guard; |
13252 | at::Tensor tmp_output = at::_ops::le_Scalar_out::call(self_, other, out_); |
13253 | return out;; |
13254 | } |
13255 | } else { |
13256 | at::Tensor tmp_output; |
13257 | { |
13258 | at::AutoDispatchSkipFunctionalize guard; |
13259 | tmp_output = at::_ops::le_Scalar::call(self_, other); |
13260 | } |
13261 | at::functionalization::impl::replace_(out, tmp_output); |
13262 | at::functionalization::impl::commit_update(out); |
13263 | at::functionalization::impl::sync(out); |
13264 | return out; |
13265 | } |
13266 | } |
13267 | |
13268 | at::Tensor & le__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13269 | if (true) { |
13270 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13271 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13272 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13273 | auto self_meta = to_meta(self); |
13274 | at::AutoDispatchSkipFunctionalize func_guard; |
13275 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13276 | at::_ops::le__Scalar::call(self_meta, other); |
13277 | } |
13278 | |
13279 | at::Tensor self_; |
13280 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13281 | at::functionalization::impl::sync(self); |
13282 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13283 | } else { |
13284 | self_ = self; |
13285 | } |
13286 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13287 | if ((false)) { |
13288 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13289 | TORCH_INTERNAL_ASSERT(false, |
13290 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13291 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13292 | } else { |
13293 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13294 | at::AutoDispatchSkipFunctionalize guard; |
13295 | at::Tensor tmp_output = at::_ops::le__Scalar::call(self_, other); |
13296 | return self;; |
13297 | } |
13298 | } else { |
13299 | at::Tensor tmp_output; |
13300 | { |
13301 | at::AutoDispatchSkipFunctionalize guard; |
13302 | tmp_output = at::_ops::le_Scalar::call(self_, other); |
13303 | } |
13304 | at::functionalization::impl::replace_(self, tmp_output); |
13305 | at::functionalization::impl::commit_update(self); |
13306 | at::functionalization::impl::sync(self); |
13307 | return self; |
13308 | } |
13309 | } |
13310 | |
13311 | at::Tensor & le_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13312 | if (false) { |
13313 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13314 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13315 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13316 | auto self_meta = to_meta(self); |
13317 | auto other_meta = to_meta(other); |
13318 | auto out_meta = to_meta(out); |
13319 | at::AutoDispatchSkipFunctionalize func_guard; |
13320 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13321 | at::_ops::le_Tensor_out::call(self_meta, other_meta, out_meta); |
13322 | } |
13323 | |
13324 | at::Tensor self_; |
13325 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13326 | at::functionalization::impl::sync(self); |
13327 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13328 | } else { |
13329 | self_ = self; |
13330 | } |
13331 | |
13332 | at::Tensor other_; |
13333 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13334 | at::functionalization::impl::sync(other); |
13335 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13336 | } else { |
13337 | other_ = other; |
13338 | } |
13339 | |
13340 | at::Tensor out_; |
13341 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13342 | at::functionalization::impl::sync(out); |
13343 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13344 | } else { |
13345 | out_ = out; |
13346 | } |
13347 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13348 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13349 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13350 | TORCH_INTERNAL_ASSERT(false, |
13351 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13352 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13353 | } else { |
13354 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13355 | at::AutoDispatchSkipFunctionalize guard; |
13356 | at::Tensor tmp_output = at::_ops::le_Tensor_out::call(self_, other_, out_); |
13357 | return out;; |
13358 | } |
13359 | } else { |
13360 | at::Tensor tmp_output; |
13361 | { |
13362 | at::AutoDispatchSkipFunctionalize guard; |
13363 | tmp_output = at::_ops::le_Tensor::call(self_, other_); |
13364 | } |
13365 | at::functionalization::impl::replace_(out, tmp_output); |
13366 | at::functionalization::impl::commit_update(out); |
13367 | at::functionalization::impl::sync(out); |
13368 | return out; |
13369 | } |
13370 | } |
13371 | |
13372 | at::Tensor & le__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13373 | if (true) { |
13374 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13375 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13376 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13377 | auto self_meta = to_meta(self); |
13378 | auto other_meta = to_meta(other); |
13379 | at::AutoDispatchSkipFunctionalize func_guard; |
13380 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13381 | at::_ops::le__Tensor::call(self_meta, other_meta); |
13382 | } |
13383 | |
13384 | at::Tensor self_; |
13385 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13386 | at::functionalization::impl::sync(self); |
13387 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13388 | } else { |
13389 | self_ = self; |
13390 | } |
13391 | |
13392 | at::Tensor other_; |
13393 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13394 | at::functionalization::impl::sync(other); |
13395 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13396 | } else { |
13397 | other_ = other; |
13398 | } |
13399 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13400 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13401 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13402 | TORCH_INTERNAL_ASSERT(false, |
13403 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13404 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13405 | } else { |
13406 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13407 | at::AutoDispatchSkipFunctionalize guard; |
13408 | at::Tensor tmp_output = at::_ops::le__Tensor::call(self_, other_); |
13409 | return self;; |
13410 | } |
13411 | } else { |
13412 | at::Tensor tmp_output; |
13413 | { |
13414 | at::AutoDispatchSkipFunctionalize guard; |
13415 | tmp_output = at::_ops::le_Tensor::call(self_, other_); |
13416 | } |
13417 | at::functionalization::impl::replace_(self, tmp_output); |
13418 | at::functionalization::impl::commit_update(self); |
13419 | at::functionalization::impl::sync(self); |
13420 | return self; |
13421 | } |
13422 | } |
13423 | |
13424 | at::Tensor & gt_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
13425 | if (false) { |
13426 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13427 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13428 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13429 | auto self_meta = to_meta(self); |
13430 | auto out_meta = to_meta(out); |
13431 | at::AutoDispatchSkipFunctionalize func_guard; |
13432 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13433 | at::_ops::gt_Scalar_out::call(self_meta, other, out_meta); |
13434 | } |
13435 | |
13436 | at::Tensor self_; |
13437 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13438 | at::functionalization::impl::sync(self); |
13439 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13440 | } else { |
13441 | self_ = self; |
13442 | } |
13443 | |
13444 | at::Tensor out_; |
13445 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13446 | at::functionalization::impl::sync(out); |
13447 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13448 | } else { |
13449 | out_ = out; |
13450 | } |
13451 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13452 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13453 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13454 | TORCH_INTERNAL_ASSERT(false, |
13455 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13456 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13457 | } else { |
13458 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13459 | at::AutoDispatchSkipFunctionalize guard; |
13460 | at::Tensor tmp_output = at::_ops::gt_Scalar_out::call(self_, other, out_); |
13461 | return out;; |
13462 | } |
13463 | } else { |
13464 | at::Tensor tmp_output; |
13465 | { |
13466 | at::AutoDispatchSkipFunctionalize guard; |
13467 | tmp_output = at::_ops::gt_Scalar::call(self_, other); |
13468 | } |
13469 | at::functionalization::impl::replace_(out, tmp_output); |
13470 | at::functionalization::impl::commit_update(out); |
13471 | at::functionalization::impl::sync(out); |
13472 | return out; |
13473 | } |
13474 | } |
13475 | |
13476 | at::Tensor & gt__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
13477 | if (true) { |
13478 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13479 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13480 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13481 | auto self_meta = to_meta(self); |
13482 | at::AutoDispatchSkipFunctionalize func_guard; |
13483 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13484 | at::_ops::gt__Scalar::call(self_meta, other); |
13485 | } |
13486 | |
13487 | at::Tensor self_; |
13488 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13489 | at::functionalization::impl::sync(self); |
13490 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13491 | } else { |
13492 | self_ = self; |
13493 | } |
13494 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13495 | if ((false)) { |
13496 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13497 | TORCH_INTERNAL_ASSERT(false, |
13498 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13499 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13500 | } else { |
13501 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13502 | at::AutoDispatchSkipFunctionalize guard; |
13503 | at::Tensor tmp_output = at::_ops::gt__Scalar::call(self_, other); |
13504 | return self;; |
13505 | } |
13506 | } else { |
13507 | at::Tensor tmp_output; |
13508 | { |
13509 | at::AutoDispatchSkipFunctionalize guard; |
13510 | tmp_output = at::_ops::gt_Scalar::call(self_, other); |
13511 | } |
13512 | at::functionalization::impl::replace_(self, tmp_output); |
13513 | at::functionalization::impl::commit_update(self); |
13514 | at::functionalization::impl::sync(self); |
13515 | return self; |
13516 | } |
13517 | } |
13518 | |
13519 | at::Tensor & gt_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
13520 | if (false) { |
13521 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13522 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13523 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13524 | auto self_meta = to_meta(self); |
13525 | auto other_meta = to_meta(other); |
13526 | auto out_meta = to_meta(out); |
13527 | at::AutoDispatchSkipFunctionalize func_guard; |
13528 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13529 | at::_ops::gt_Tensor_out::call(self_meta, other_meta, out_meta); |
13530 | } |
13531 | |
13532 | at::Tensor self_; |
13533 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13534 | at::functionalization::impl::sync(self); |
13535 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13536 | } else { |
13537 | self_ = self; |
13538 | } |
13539 | |
13540 | at::Tensor other_; |
13541 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13542 | at::functionalization::impl::sync(other); |
13543 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13544 | } else { |
13545 | other_ = other; |
13546 | } |
13547 | |
13548 | at::Tensor out_; |
13549 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13550 | at::functionalization::impl::sync(out); |
13551 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13552 | } else { |
13553 | out_ = out; |
13554 | } |
13555 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13556 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
13557 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13558 | TORCH_INTERNAL_ASSERT(false, |
13559 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13560 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13561 | } else { |
13562 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13563 | at::AutoDispatchSkipFunctionalize guard; |
13564 | at::Tensor tmp_output = at::_ops::gt_Tensor_out::call(self_, other_, out_); |
13565 | return out;; |
13566 | } |
13567 | } else { |
13568 | at::Tensor tmp_output; |
13569 | { |
13570 | at::AutoDispatchSkipFunctionalize guard; |
13571 | tmp_output = at::_ops::gt_Tensor::call(self_, other_); |
13572 | } |
13573 | at::functionalization::impl::replace_(out, tmp_output); |
13574 | at::functionalization::impl::commit_update(out); |
13575 | at::functionalization::impl::sync(out); |
13576 | return out; |
13577 | } |
13578 | } |
13579 | |
13580 | at::Tensor & gt__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
13581 | if (true) { |
13582 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13583 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13584 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13585 | auto self_meta = to_meta(self); |
13586 | auto other_meta = to_meta(other); |
13587 | at::AutoDispatchSkipFunctionalize func_guard; |
13588 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13589 | at::_ops::gt__Tensor::call(self_meta, other_meta); |
13590 | } |
13591 | |
13592 | at::Tensor self_; |
13593 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13594 | at::functionalization::impl::sync(self); |
13595 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13596 | } else { |
13597 | self_ = self; |
13598 | } |
13599 | |
13600 | at::Tensor other_; |
13601 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
13602 | at::functionalization::impl::sync(other); |
13603 | other_ = at::functionalization::impl::from_functional_tensor(other); |
13604 | } else { |
13605 | other_ = other; |
13606 | } |
13607 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13608 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
13609 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13610 | TORCH_INTERNAL_ASSERT(false, |
13611 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13612 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13613 | } else { |
13614 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13615 | at::AutoDispatchSkipFunctionalize guard; |
13616 | at::Tensor tmp_output = at::_ops::gt__Tensor::call(self_, other_); |
13617 | return self;; |
13618 | } |
13619 | } else { |
13620 | at::Tensor tmp_output; |
13621 | { |
13622 | at::AutoDispatchSkipFunctionalize guard; |
13623 | tmp_output = at::_ops::gt_Tensor::call(self_, other_); |
13624 | } |
13625 | at::functionalization::impl::replace_(self, tmp_output); |
13626 | at::functionalization::impl::commit_update(self); |
13627 | at::functionalization::impl::sync(self); |
13628 | return self; |
13629 | } |
13630 | } |
13631 | |
13632 | at::Tensor & nonzero_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
13633 | if (false) { |
13634 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13635 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13636 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13637 | auto self_meta = to_meta(self); |
13638 | auto out_meta = to_meta(out); |
13639 | at::AutoDispatchSkipFunctionalize func_guard; |
13640 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13641 | at::_ops::nonzero_out::call(self_meta, out_meta); |
13642 | } |
13643 | |
13644 | at::Tensor self_; |
13645 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13646 | at::functionalization::impl::sync(self); |
13647 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13648 | } else { |
13649 | self_ = self; |
13650 | } |
13651 | |
13652 | at::Tensor out_; |
13653 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13654 | at::functionalization::impl::sync(out); |
13655 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13656 | } else { |
13657 | out_ = out; |
13658 | } |
13659 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13660 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13661 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13662 | TORCH_INTERNAL_ASSERT(false, |
13663 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13664 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13665 | } else { |
13666 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13667 | at::AutoDispatchSkipFunctionalize guard; |
13668 | at::Tensor tmp_output = at::_ops::nonzero_out::call(self_, out_); |
13669 | return out;; |
13670 | } |
13671 | } else { |
13672 | at::Tensor tmp_output; |
13673 | { |
13674 | at::AutoDispatchSkipFunctionalize guard; |
13675 | tmp_output = at::_ops::nonzero::call(self_); |
13676 | } |
13677 | at::functionalization::impl::replace_(out, tmp_output); |
13678 | at::functionalization::impl::commit_update(out); |
13679 | at::functionalization::impl::sync(out); |
13680 | return out; |
13681 | } |
13682 | } |
13683 | |
13684 | at::Tensor & addcdiv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { |
13685 | if (false) { |
13686 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13687 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13688 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13689 | auto self_meta = to_meta(self); |
13690 | auto tensor1_meta = to_meta(tensor1); |
13691 | auto tensor2_meta = to_meta(tensor2); |
13692 | auto out_meta = to_meta(out); |
13693 | at::AutoDispatchSkipFunctionalize func_guard; |
13694 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13695 | at::_ops::addcdiv_out::call(self_meta, tensor1_meta, tensor2_meta, value, out_meta); |
13696 | } |
13697 | |
13698 | at::Tensor self_; |
13699 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13700 | at::functionalization::impl::sync(self); |
13701 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13702 | } else { |
13703 | self_ = self; |
13704 | } |
13705 | |
13706 | at::Tensor tensor1_; |
13707 | if (at::functionalization::impl::isFunctionalTensor(tensor1)) { |
13708 | at::functionalization::impl::sync(tensor1); |
13709 | tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1); |
13710 | } else { |
13711 | tensor1_ = tensor1; |
13712 | } |
13713 | |
13714 | at::Tensor tensor2_; |
13715 | if (at::functionalization::impl::isFunctionalTensor(tensor2)) { |
13716 | at::functionalization::impl::sync(tensor2); |
13717 | tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2); |
13718 | } else { |
13719 | tensor2_ = tensor2; |
13720 | } |
13721 | |
13722 | at::Tensor out_; |
13723 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13724 | at::functionalization::impl::sync(out); |
13725 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13726 | } else { |
13727 | out_ = out; |
13728 | } |
13729 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13730 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) { |
13731 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13732 | TORCH_INTERNAL_ASSERT(false, |
13733 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13734 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13735 | } else { |
13736 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13737 | at::AutoDispatchSkipFunctionalize guard; |
13738 | at::Tensor tmp_output = at::_ops::addcdiv_out::call(self_, tensor1_, tensor2_, value, out_); |
13739 | return out;; |
13740 | } |
13741 | } else { |
13742 | at::Tensor tmp_output; |
13743 | { |
13744 | at::AutoDispatchSkipFunctionalize guard; |
13745 | tmp_output = at::_ops::addcdiv::call(self_, tensor1_, tensor2_, value); |
13746 | } |
13747 | at::functionalization::impl::replace_(out, tmp_output); |
13748 | at::functionalization::impl::commit_update(out); |
13749 | at::functionalization::impl::sync(out); |
13750 | return out; |
13751 | } |
13752 | } |
13753 | |
13754 | at::Tensor & addcdiv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { |
13755 | if (true) { |
13756 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13757 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13758 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13759 | auto self_meta = to_meta(self); |
13760 | auto tensor1_meta = to_meta(tensor1); |
13761 | auto tensor2_meta = to_meta(tensor2); |
13762 | at::AutoDispatchSkipFunctionalize func_guard; |
13763 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13764 | at::_ops::addcdiv_::call(self_meta, tensor1_meta, tensor2_meta, value); |
13765 | } |
13766 | |
13767 | at::Tensor self_; |
13768 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13769 | at::functionalization::impl::sync(self); |
13770 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13771 | } else { |
13772 | self_ = self; |
13773 | } |
13774 | |
13775 | at::Tensor tensor1_; |
13776 | if (at::functionalization::impl::isFunctionalTensor(tensor1)) { |
13777 | at::functionalization::impl::sync(tensor1); |
13778 | tensor1_ = at::functionalization::impl::from_functional_tensor(tensor1); |
13779 | } else { |
13780 | tensor1_ = tensor1; |
13781 | } |
13782 | |
13783 | at::Tensor tensor2_; |
13784 | if (at::functionalization::impl::isFunctionalTensor(tensor2)) { |
13785 | at::functionalization::impl::sync(tensor2); |
13786 | tensor2_ = at::functionalization::impl::from_functional_tensor(tensor2); |
13787 | } else { |
13788 | tensor2_ = tensor2; |
13789 | } |
13790 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
13791 | if ((false || at::functionalization::impl::isFunctionalTensor(tensor1) || at::functionalization::impl::isFunctionalTensor(tensor2))) { |
13792 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13793 | TORCH_INTERNAL_ASSERT(false, |
13794 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13795 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13796 | } else { |
13797 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13798 | at::AutoDispatchSkipFunctionalize guard; |
13799 | at::Tensor tmp_output = at::_ops::addcdiv_::call(self_, tensor1_, tensor2_, value); |
13800 | return self;; |
13801 | } |
13802 | } else { |
13803 | at::Tensor tmp_output; |
13804 | { |
13805 | at::AutoDispatchSkipFunctionalize guard; |
13806 | tmp_output = at::_ops::addcdiv::call(self_, tensor1_, tensor2_, value); |
13807 | } |
13808 | at::functionalization::impl::replace_(self, tmp_output); |
13809 | at::functionalization::impl::commit_update(self); |
13810 | at::functionalization::impl::sync(self); |
13811 | return self; |
13812 | } |
13813 | } |
13814 | |
13815 | ::std::tuple<at::Tensor &,at::Tensor &> triangular_solve_out_X(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { |
13816 | if (false) { |
13817 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13818 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13819 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13820 | auto self_meta = to_meta(self); |
13821 | auto A_meta = to_meta(A); |
13822 | auto X_meta = to_meta(X); |
13823 | auto M_meta = to_meta(M); |
13824 | at::AutoDispatchSkipFunctionalize func_guard; |
13825 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13826 | at::_ops::triangular_solve_X::call(self_meta, A_meta, upper, transpose, unitriangular, X_meta, M_meta); |
13827 | } |
13828 | |
13829 | at::Tensor self_; |
13830 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13831 | at::functionalization::impl::sync(self); |
13832 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13833 | } else { |
13834 | self_ = self; |
13835 | } |
13836 | |
13837 | at::Tensor A_; |
13838 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
13839 | at::functionalization::impl::sync(A); |
13840 | A_ = at::functionalization::impl::from_functional_tensor(A); |
13841 | } else { |
13842 | A_ = A; |
13843 | } |
13844 | |
13845 | at::Tensor X_; |
13846 | if (at::functionalization::impl::isFunctionalTensor(X)) { |
13847 | at::functionalization::impl::sync(X); |
13848 | X_ = at::functionalization::impl::from_functional_tensor(X); |
13849 | } else { |
13850 | X_ = X; |
13851 | } |
13852 | |
13853 | at::Tensor M_; |
13854 | if (at::functionalization::impl::isFunctionalTensor(M)) { |
13855 | at::functionalization::impl::sync(M); |
13856 | M_ = at::functionalization::impl::from_functional_tensor(M); |
13857 | } else { |
13858 | M_ = M; |
13859 | } |
13860 | if (!(true && at::functionalization::impl::isFunctionalTensor(X) && at::functionalization::impl::isFunctionalTensor(M))) { |
13861 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(A))) { |
13862 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13863 | TORCH_INTERNAL_ASSERT(false, |
13864 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13865 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13866 | } else { |
13867 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13868 | at::AutoDispatchSkipFunctionalize guard; |
13869 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::triangular_solve_X::call(self_, A_, upper, transpose, unitriangular, X_, M_); |
13870 | return ::std::tuple<at::Tensor &,at::Tensor &>(X, M);; |
13871 | } |
13872 | } else { |
13873 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
13874 | { |
13875 | at::AutoDispatchSkipFunctionalize guard; |
13876 | tmp_output = at::_ops::triangular_solve::call(self_, A_, upper, transpose, unitriangular); |
13877 | } |
13878 | at::functionalization::impl::replace_(X, std::get<0>(tmp_output)); |
13879 | at::functionalization::impl::commit_update(X); |
13880 | at::functionalization::impl::sync(X); |
13881 | at::functionalization::impl::replace_(M, std::get<1>(tmp_output)); |
13882 | at::functionalization::impl::commit_update(M); |
13883 | at::functionalization::impl::sync(M); |
13884 | return ::std::tuple<at::Tensor &,at::Tensor &>(X, M); |
13885 | } |
13886 | } |
13887 | |
13888 | at::Tensor & cholesky_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) { |
13889 | if (false) { |
13890 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13891 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13892 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13893 | auto self_meta = to_meta(self); |
13894 | auto input2_meta = to_meta(input2); |
13895 | auto out_meta = to_meta(out); |
13896 | at::AutoDispatchSkipFunctionalize func_guard; |
13897 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13898 | at::_ops::cholesky_solve_out::call(self_meta, input2_meta, upper, out_meta); |
13899 | } |
13900 | |
13901 | at::Tensor self_; |
13902 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13903 | at::functionalization::impl::sync(self); |
13904 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13905 | } else { |
13906 | self_ = self; |
13907 | } |
13908 | |
13909 | at::Tensor input2_; |
13910 | if (at::functionalization::impl::isFunctionalTensor(input2)) { |
13911 | at::functionalization::impl::sync(input2); |
13912 | input2_ = at::functionalization::impl::from_functional_tensor(input2); |
13913 | } else { |
13914 | input2_ = input2; |
13915 | } |
13916 | |
13917 | at::Tensor out_; |
13918 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13919 | at::functionalization::impl::sync(out); |
13920 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13921 | } else { |
13922 | out_ = out; |
13923 | } |
13924 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13925 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(input2))) { |
13926 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13927 | TORCH_INTERNAL_ASSERT(false, |
13928 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13929 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13930 | } else { |
13931 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13932 | at::AutoDispatchSkipFunctionalize guard; |
13933 | at::Tensor tmp_output = at::_ops::cholesky_solve_out::call(self_, input2_, upper, out_); |
13934 | return out;; |
13935 | } |
13936 | } else { |
13937 | at::Tensor tmp_output; |
13938 | { |
13939 | at::AutoDispatchSkipFunctionalize guard; |
13940 | tmp_output = at::_ops::cholesky_solve::call(self_, input2_, upper); |
13941 | } |
13942 | at::functionalization::impl::replace_(out, tmp_output); |
13943 | at::functionalization::impl::commit_update(out); |
13944 | at::functionalization::impl::sync(out); |
13945 | return out; |
13946 | } |
13947 | } |
13948 | |
13949 | at::Tensor & cholesky_inverse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { |
13950 | if (false) { |
13951 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
13952 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
13953 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
13954 | auto self_meta = to_meta(self); |
13955 | auto out_meta = to_meta(out); |
13956 | at::AutoDispatchSkipFunctionalize func_guard; |
13957 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
13958 | at::_ops::cholesky_inverse_out::call(self_meta, upper, out_meta); |
13959 | } |
13960 | |
13961 | at::Tensor self_; |
13962 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
13963 | at::functionalization::impl::sync(self); |
13964 | self_ = at::functionalization::impl::from_functional_tensor(self); |
13965 | } else { |
13966 | self_ = self; |
13967 | } |
13968 | |
13969 | at::Tensor out_; |
13970 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
13971 | at::functionalization::impl::sync(out); |
13972 | out_ = at::functionalization::impl::from_functional_tensor(out); |
13973 | } else { |
13974 | out_ = out; |
13975 | } |
13976 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
13977 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
13978 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
13979 | TORCH_INTERNAL_ASSERT(false, |
13980 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
13981 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
13982 | } else { |
13983 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
13984 | at::AutoDispatchSkipFunctionalize guard; |
13985 | at::Tensor tmp_output = at::_ops::cholesky_inverse_out::call(self_, upper, out_); |
13986 | return out;; |
13987 | } |
13988 | } else { |
13989 | at::Tensor tmp_output; |
13990 | { |
13991 | at::AutoDispatchSkipFunctionalize guard; |
13992 | tmp_output = at::_ops::cholesky_inverse::call(self_, upper); |
13993 | } |
13994 | at::functionalization::impl::replace_(out, tmp_output); |
13995 | at::functionalization::impl::commit_update(out); |
13996 | at::functionalization::impl::sync(out); |
13997 | return out; |
13998 | } |
13999 | } |
14000 | |
14001 | ::std::tuple<at::Tensor &,at::Tensor &> qr_out_Q(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) { |
14002 | if (false) { |
14003 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14004 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14005 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14006 | auto self_meta = to_meta(self); |
14007 | auto Q_meta = to_meta(Q); |
14008 | auto R_meta = to_meta(R); |
14009 | at::AutoDispatchSkipFunctionalize func_guard; |
14010 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14011 | at::_ops::qr_Q::call(self_meta, some, Q_meta, R_meta); |
14012 | } |
14013 | |
14014 | at::Tensor self_; |
14015 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14016 | at::functionalization::impl::sync(self); |
14017 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14018 | } else { |
14019 | self_ = self; |
14020 | } |
14021 | |
14022 | at::Tensor Q_; |
14023 | if (at::functionalization::impl::isFunctionalTensor(Q)) { |
14024 | at::functionalization::impl::sync(Q); |
14025 | Q_ = at::functionalization::impl::from_functional_tensor(Q); |
14026 | } else { |
14027 | Q_ = Q; |
14028 | } |
14029 | |
14030 | at::Tensor R_; |
14031 | if (at::functionalization::impl::isFunctionalTensor(R)) { |
14032 | at::functionalization::impl::sync(R); |
14033 | R_ = at::functionalization::impl::from_functional_tensor(R); |
14034 | } else { |
14035 | R_ = R; |
14036 | } |
14037 | if (!(true && at::functionalization::impl::isFunctionalTensor(Q) && at::functionalization::impl::isFunctionalTensor(R))) { |
14038 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14039 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14040 | TORCH_INTERNAL_ASSERT(false, |
14041 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14042 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14043 | } else { |
14044 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14045 | at::AutoDispatchSkipFunctionalize guard; |
14046 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::qr_Q::call(self_, some, Q_, R_); |
14047 | return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R);; |
14048 | } |
14049 | } else { |
14050 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
14051 | { |
14052 | at::AutoDispatchSkipFunctionalize guard; |
14053 | tmp_output = at::_ops::qr::call(self_, some); |
14054 | } |
14055 | at::functionalization::impl::replace_(Q, std::get<0>(tmp_output)); |
14056 | at::functionalization::impl::commit_update(Q); |
14057 | at::functionalization::impl::sync(Q); |
14058 | at::functionalization::impl::replace_(R, std::get<1>(tmp_output)); |
14059 | at::functionalization::impl::commit_update(R); |
14060 | at::functionalization::impl::sync(R); |
14061 | return ::std::tuple<at::Tensor &,at::Tensor &>(Q, R); |
14062 | } |
14063 | } |
14064 | |
14065 | ::std::tuple<at::Tensor &,at::Tensor &> geqrf_out_a(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau) { |
14066 | if (false) { |
14067 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14068 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14069 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14070 | auto self_meta = to_meta(self); |
14071 | auto a_meta = to_meta(a); |
14072 | auto tau_meta = to_meta(tau); |
14073 | at::AutoDispatchSkipFunctionalize func_guard; |
14074 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14075 | at::_ops::geqrf_a::call(self_meta, a_meta, tau_meta); |
14076 | } |
14077 | |
14078 | at::Tensor self_; |
14079 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14080 | at::functionalization::impl::sync(self); |
14081 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14082 | } else { |
14083 | self_ = self; |
14084 | } |
14085 | |
14086 | at::Tensor a_; |
14087 | if (at::functionalization::impl::isFunctionalTensor(a)) { |
14088 | at::functionalization::impl::sync(a); |
14089 | a_ = at::functionalization::impl::from_functional_tensor(a); |
14090 | } else { |
14091 | a_ = a; |
14092 | } |
14093 | |
14094 | at::Tensor tau_; |
14095 | if (at::functionalization::impl::isFunctionalTensor(tau)) { |
14096 | at::functionalization::impl::sync(tau); |
14097 | tau_ = at::functionalization::impl::from_functional_tensor(tau); |
14098 | } else { |
14099 | tau_ = tau; |
14100 | } |
14101 | if (!(true && at::functionalization::impl::isFunctionalTensor(a) && at::functionalization::impl::isFunctionalTensor(tau))) { |
14102 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14103 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14104 | TORCH_INTERNAL_ASSERT(false, |
14105 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14106 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14107 | } else { |
14108 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14109 | at::AutoDispatchSkipFunctionalize guard; |
14110 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::geqrf_a::call(self_, a_, tau_); |
14111 | return ::std::tuple<at::Tensor &,at::Tensor &>(a, tau);; |
14112 | } |
14113 | } else { |
14114 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
14115 | { |
14116 | at::AutoDispatchSkipFunctionalize guard; |
14117 | tmp_output = at::_ops::geqrf::call(self_); |
14118 | } |
14119 | at::functionalization::impl::replace_(a, std::get<0>(tmp_output)); |
14120 | at::functionalization::impl::commit_update(a); |
14121 | at::functionalization::impl::sync(a); |
14122 | at::functionalization::impl::replace_(tau, std::get<1>(tmp_output)); |
14123 | at::functionalization::impl::commit_update(tau); |
14124 | at::functionalization::impl::sync(tau); |
14125 | return ::std::tuple<at::Tensor &,at::Tensor &>(a, tau); |
14126 | } |
14127 | } |
14128 | |
14129 | at::Tensor & orgqr_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) { |
14130 | if (false) { |
14131 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14132 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14133 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14134 | auto self_meta = to_meta(self); |
14135 | auto input2_meta = to_meta(input2); |
14136 | auto out_meta = to_meta(out); |
14137 | at::AutoDispatchSkipFunctionalize func_guard; |
14138 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14139 | at::_ops::orgqr_out::call(self_meta, input2_meta, out_meta); |
14140 | } |
14141 | |
14142 | at::Tensor self_; |
14143 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14144 | at::functionalization::impl::sync(self); |
14145 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14146 | } else { |
14147 | self_ = self; |
14148 | } |
14149 | |
14150 | at::Tensor input2_; |
14151 | if (at::functionalization::impl::isFunctionalTensor(input2)) { |
14152 | at::functionalization::impl::sync(input2); |
14153 | input2_ = at::functionalization::impl::from_functional_tensor(input2); |
14154 | } else { |
14155 | input2_ = input2; |
14156 | } |
14157 | |
14158 | at::Tensor out_; |
14159 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14160 | at::functionalization::impl::sync(out); |
14161 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14162 | } else { |
14163 | out_ = out; |
14164 | } |
14165 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14166 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(input2))) { |
14167 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14168 | TORCH_INTERNAL_ASSERT(false, |
14169 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14170 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14171 | } else { |
14172 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14173 | at::AutoDispatchSkipFunctionalize guard; |
14174 | at::Tensor tmp_output = at::_ops::orgqr_out::call(self_, input2_, out_); |
14175 | return out;; |
14176 | } |
14177 | } else { |
14178 | at::Tensor tmp_output; |
14179 | { |
14180 | at::AutoDispatchSkipFunctionalize guard; |
14181 | tmp_output = at::_ops::orgqr::call(self_, input2_); |
14182 | } |
14183 | at::functionalization::impl::replace_(out, tmp_output); |
14184 | at::functionalization::impl::commit_update(out); |
14185 | at::functionalization::impl::sync(out); |
14186 | return out; |
14187 | } |
14188 | } |
14189 | |
14190 | at::Tensor & lu_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { |
14191 | if (false) { |
14192 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14193 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14194 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14195 | auto self_meta = to_meta(self); |
14196 | auto LU_data_meta = to_meta(LU_data); |
14197 | auto LU_pivots_meta = to_meta(LU_pivots); |
14198 | auto out_meta = to_meta(out); |
14199 | at::AutoDispatchSkipFunctionalize func_guard; |
14200 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14201 | at::_ops::lu_solve_out::call(self_meta, LU_data_meta, LU_pivots_meta, out_meta); |
14202 | } |
14203 | |
14204 | at::Tensor self_; |
14205 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14206 | at::functionalization::impl::sync(self); |
14207 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14208 | } else { |
14209 | self_ = self; |
14210 | } |
14211 | |
14212 | at::Tensor LU_data_; |
14213 | if (at::functionalization::impl::isFunctionalTensor(LU_data)) { |
14214 | at::functionalization::impl::sync(LU_data); |
14215 | LU_data_ = at::functionalization::impl::from_functional_tensor(LU_data); |
14216 | } else { |
14217 | LU_data_ = LU_data; |
14218 | } |
14219 | |
14220 | at::Tensor LU_pivots_; |
14221 | if (at::functionalization::impl::isFunctionalTensor(LU_pivots)) { |
14222 | at::functionalization::impl::sync(LU_pivots); |
14223 | LU_pivots_ = at::functionalization::impl::from_functional_tensor(LU_pivots); |
14224 | } else { |
14225 | LU_pivots_ = LU_pivots; |
14226 | } |
14227 | |
14228 | at::Tensor out_; |
14229 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14230 | at::functionalization::impl::sync(out); |
14231 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14232 | } else { |
14233 | out_ = out; |
14234 | } |
14235 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14236 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(LU_data) || at::functionalization::impl::isFunctionalTensor(LU_pivots))) { |
14237 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14238 | TORCH_INTERNAL_ASSERT(false, |
14239 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14240 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14241 | } else { |
14242 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14243 | at::AutoDispatchSkipFunctionalize guard; |
14244 | at::Tensor tmp_output = at::_ops::lu_solve_out::call(self_, LU_data_, LU_pivots_, out_); |
14245 | return out;; |
14246 | } |
14247 | } else { |
14248 | at::Tensor tmp_output; |
14249 | { |
14250 | at::AutoDispatchSkipFunctionalize guard; |
14251 | tmp_output = at::_ops::lu_solve::call(self_, LU_data_, LU_pivots_); |
14252 | } |
14253 | at::functionalization::impl::replace_(out, tmp_output); |
14254 | at::functionalization::impl::commit_update(out); |
14255 | at::functionalization::impl::sync(out); |
14256 | return out; |
14257 | } |
14258 | } |
14259 | |
14260 | at::Tensor & lgamma_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14261 | if (false) { |
14262 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14263 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14264 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14265 | auto self_meta = to_meta(self); |
14266 | auto out_meta = to_meta(out); |
14267 | at::AutoDispatchSkipFunctionalize func_guard; |
14268 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14269 | at::_ops::lgamma_out::call(self_meta, out_meta); |
14270 | } |
14271 | |
14272 | at::Tensor self_; |
14273 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14274 | at::functionalization::impl::sync(self); |
14275 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14276 | } else { |
14277 | self_ = self; |
14278 | } |
14279 | |
14280 | at::Tensor out_; |
14281 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14282 | at::functionalization::impl::sync(out); |
14283 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14284 | } else { |
14285 | out_ = out; |
14286 | } |
14287 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14288 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14289 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14290 | TORCH_INTERNAL_ASSERT(false, |
14291 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14292 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14293 | } else { |
14294 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14295 | at::AutoDispatchSkipFunctionalize guard; |
14296 | at::Tensor tmp_output = at::_ops::lgamma_out::call(self_, out_); |
14297 | return out;; |
14298 | } |
14299 | } else { |
14300 | at::Tensor tmp_output; |
14301 | { |
14302 | at::AutoDispatchSkipFunctionalize guard; |
14303 | tmp_output = at::_ops::lgamma::call(self_); |
14304 | } |
14305 | at::functionalization::impl::replace_(out, tmp_output); |
14306 | at::functionalization::impl::commit_update(out); |
14307 | at::functionalization::impl::sync(out); |
14308 | return out; |
14309 | } |
14310 | } |
14311 | |
14312 | at::Tensor & lgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
14313 | if (true) { |
14314 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14315 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14316 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14317 | auto self_meta = to_meta(self); |
14318 | at::AutoDispatchSkipFunctionalize func_guard; |
14319 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14320 | at::_ops::lgamma_::call(self_meta); |
14321 | } |
14322 | |
14323 | at::Tensor self_; |
14324 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14325 | at::functionalization::impl::sync(self); |
14326 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14327 | } else { |
14328 | self_ = self; |
14329 | } |
14330 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14331 | if ((false)) { |
14332 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14333 | TORCH_INTERNAL_ASSERT(false, |
14334 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14335 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14336 | } else { |
14337 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14338 | at::AutoDispatchSkipFunctionalize guard; |
14339 | at::Tensor tmp_output = at::_ops::lgamma_::call(self_); |
14340 | return self;; |
14341 | } |
14342 | } else { |
14343 | at::Tensor tmp_output; |
14344 | { |
14345 | at::AutoDispatchSkipFunctionalize guard; |
14346 | tmp_output = at::_ops::lgamma::call(self_); |
14347 | } |
14348 | at::functionalization::impl::replace_(self, tmp_output); |
14349 | at::functionalization::impl::commit_update(self); |
14350 | at::functionalization::impl::sync(self); |
14351 | return self; |
14352 | } |
14353 | } |
14354 | |
14355 | at::Tensor & erfinv_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14356 | if (false) { |
14357 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14358 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14359 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14360 | auto self_meta = to_meta(self); |
14361 | auto out_meta = to_meta(out); |
14362 | at::AutoDispatchSkipFunctionalize func_guard; |
14363 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14364 | at::_ops::erfinv_out::call(self_meta, out_meta); |
14365 | } |
14366 | |
14367 | at::Tensor self_; |
14368 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14369 | at::functionalization::impl::sync(self); |
14370 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14371 | } else { |
14372 | self_ = self; |
14373 | } |
14374 | |
14375 | at::Tensor out_; |
14376 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14377 | at::functionalization::impl::sync(out); |
14378 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14379 | } else { |
14380 | out_ = out; |
14381 | } |
14382 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14383 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14384 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14385 | TORCH_INTERNAL_ASSERT(false, |
14386 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14387 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14388 | } else { |
14389 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14390 | at::AutoDispatchSkipFunctionalize guard; |
14391 | at::Tensor tmp_output = at::_ops::erfinv_out::call(self_, out_); |
14392 | return out;; |
14393 | } |
14394 | } else { |
14395 | at::Tensor tmp_output; |
14396 | { |
14397 | at::AutoDispatchSkipFunctionalize guard; |
14398 | tmp_output = at::_ops::erfinv::call(self_); |
14399 | } |
14400 | at::functionalization::impl::replace_(out, tmp_output); |
14401 | at::functionalization::impl::commit_update(out); |
14402 | at::functionalization::impl::sync(out); |
14403 | return out; |
14404 | } |
14405 | } |
14406 | |
14407 | at::Tensor & erfinv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
14408 | if (true) { |
14409 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14410 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14411 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14412 | auto self_meta = to_meta(self); |
14413 | at::AutoDispatchSkipFunctionalize func_guard; |
14414 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14415 | at::_ops::erfinv_::call(self_meta); |
14416 | } |
14417 | |
14418 | at::Tensor self_; |
14419 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14420 | at::functionalization::impl::sync(self); |
14421 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14422 | } else { |
14423 | self_ = self; |
14424 | } |
14425 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14426 | if ((false)) { |
14427 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14428 | TORCH_INTERNAL_ASSERT(false, |
14429 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14430 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14431 | } else { |
14432 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14433 | at::AutoDispatchSkipFunctionalize guard; |
14434 | at::Tensor tmp_output = at::_ops::erfinv_::call(self_); |
14435 | return self;; |
14436 | } |
14437 | } else { |
14438 | at::Tensor tmp_output; |
14439 | { |
14440 | at::AutoDispatchSkipFunctionalize guard; |
14441 | tmp_output = at::_ops::erfinv::call(self_); |
14442 | } |
14443 | at::functionalization::impl::replace_(self, tmp_output); |
14444 | at::functionalization::impl::commit_update(self); |
14445 | at::functionalization::impl::sync(self); |
14446 | return self; |
14447 | } |
14448 | } |
14449 | |
14450 | at::Tensor & i0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14451 | if (false) { |
14452 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14453 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14454 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14455 | auto self_meta = to_meta(self); |
14456 | auto out_meta = to_meta(out); |
14457 | at::AutoDispatchSkipFunctionalize func_guard; |
14458 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14459 | at::_ops::i0_out::call(self_meta, out_meta); |
14460 | } |
14461 | |
14462 | at::Tensor self_; |
14463 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14464 | at::functionalization::impl::sync(self); |
14465 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14466 | } else { |
14467 | self_ = self; |
14468 | } |
14469 | |
14470 | at::Tensor out_; |
14471 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14472 | at::functionalization::impl::sync(out); |
14473 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14474 | } else { |
14475 | out_ = out; |
14476 | } |
14477 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14478 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14479 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14480 | TORCH_INTERNAL_ASSERT(false, |
14481 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14482 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14483 | } else { |
14484 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14485 | at::AutoDispatchSkipFunctionalize guard; |
14486 | at::Tensor tmp_output = at::_ops::i0_out::call(self_, out_); |
14487 | return out;; |
14488 | } |
14489 | } else { |
14490 | at::Tensor tmp_output; |
14491 | { |
14492 | at::AutoDispatchSkipFunctionalize guard; |
14493 | tmp_output = at::_ops::i0::call(self_); |
14494 | } |
14495 | at::functionalization::impl::replace_(out, tmp_output); |
14496 | at::functionalization::impl::commit_update(out); |
14497 | at::functionalization::impl::sync(out); |
14498 | return out; |
14499 | } |
14500 | } |
14501 | |
14502 | at::Tensor & i0_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
14503 | if (true) { |
14504 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14505 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14506 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14507 | auto self_meta = to_meta(self); |
14508 | at::AutoDispatchSkipFunctionalize func_guard; |
14509 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14510 | at::_ops::i0_::call(self_meta); |
14511 | } |
14512 | |
14513 | at::Tensor self_; |
14514 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14515 | at::functionalization::impl::sync(self); |
14516 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14517 | } else { |
14518 | self_ = self; |
14519 | } |
14520 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14521 | if ((false)) { |
14522 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14523 | TORCH_INTERNAL_ASSERT(false, |
14524 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14525 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14526 | } else { |
14527 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14528 | at::AutoDispatchSkipFunctionalize guard; |
14529 | at::Tensor tmp_output = at::_ops::i0_::call(self_); |
14530 | return self;; |
14531 | } |
14532 | } else { |
14533 | at::Tensor tmp_output; |
14534 | { |
14535 | at::AutoDispatchSkipFunctionalize guard; |
14536 | tmp_output = at::_ops::i0::call(self_); |
14537 | } |
14538 | at::functionalization::impl::replace_(self, tmp_output); |
14539 | at::functionalization::impl::commit_update(self); |
14540 | at::functionalization::impl::sync(self); |
14541 | return self; |
14542 | } |
14543 | } |
14544 | |
14545 | at::Tensor & sign_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14546 | if (false) { |
14547 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14548 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14549 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14550 | auto self_meta = to_meta(self); |
14551 | auto out_meta = to_meta(out); |
14552 | at::AutoDispatchSkipFunctionalize func_guard; |
14553 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14554 | at::_ops::sign_out::call(self_meta, out_meta); |
14555 | } |
14556 | |
14557 | at::Tensor self_; |
14558 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14559 | at::functionalization::impl::sync(self); |
14560 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14561 | } else { |
14562 | self_ = self; |
14563 | } |
14564 | |
14565 | at::Tensor out_; |
14566 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14567 | at::functionalization::impl::sync(out); |
14568 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14569 | } else { |
14570 | out_ = out; |
14571 | } |
14572 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14573 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14574 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14575 | TORCH_INTERNAL_ASSERT(false, |
14576 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14577 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14578 | } else { |
14579 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14580 | at::AutoDispatchSkipFunctionalize guard; |
14581 | at::Tensor tmp_output = at::_ops::sign_out::call(self_, out_); |
14582 | return out;; |
14583 | } |
14584 | } else { |
14585 | at::Tensor tmp_output; |
14586 | { |
14587 | at::AutoDispatchSkipFunctionalize guard; |
14588 | tmp_output = at::_ops::sign::call(self_); |
14589 | } |
14590 | at::functionalization::impl::replace_(out, tmp_output); |
14591 | at::functionalization::impl::commit_update(out); |
14592 | at::functionalization::impl::sync(out); |
14593 | return out; |
14594 | } |
14595 | } |
14596 | |
14597 | at::Tensor & sign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
14598 | if (true) { |
14599 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14600 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14601 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14602 | auto self_meta = to_meta(self); |
14603 | at::AutoDispatchSkipFunctionalize func_guard; |
14604 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14605 | at::_ops::sign_::call(self_meta); |
14606 | } |
14607 | |
14608 | at::Tensor self_; |
14609 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14610 | at::functionalization::impl::sync(self); |
14611 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14612 | } else { |
14613 | self_ = self; |
14614 | } |
14615 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14616 | if ((false)) { |
14617 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14618 | TORCH_INTERNAL_ASSERT(false, |
14619 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14620 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14621 | } else { |
14622 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14623 | at::AutoDispatchSkipFunctionalize guard; |
14624 | at::Tensor tmp_output = at::_ops::sign_::call(self_); |
14625 | return self;; |
14626 | } |
14627 | } else { |
14628 | at::Tensor tmp_output; |
14629 | { |
14630 | at::AutoDispatchSkipFunctionalize guard; |
14631 | tmp_output = at::_ops::sign::call(self_); |
14632 | } |
14633 | at::functionalization::impl::replace_(self, tmp_output); |
14634 | at::functionalization::impl::commit_update(self); |
14635 | at::functionalization::impl::sync(self); |
14636 | return self; |
14637 | } |
14638 | } |
14639 | |
14640 | at::Tensor & signbit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
14641 | if (false) { |
14642 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14643 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14644 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14645 | auto self_meta = to_meta(self); |
14646 | auto out_meta = to_meta(out); |
14647 | at::AutoDispatchSkipFunctionalize func_guard; |
14648 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14649 | at::_ops::signbit_out::call(self_meta, out_meta); |
14650 | } |
14651 | |
14652 | at::Tensor self_; |
14653 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14654 | at::functionalization::impl::sync(self); |
14655 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14656 | } else { |
14657 | self_ = self; |
14658 | } |
14659 | |
14660 | at::Tensor out_; |
14661 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14662 | at::functionalization::impl::sync(out); |
14663 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14664 | } else { |
14665 | out_ = out; |
14666 | } |
14667 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14668 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14669 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14670 | TORCH_INTERNAL_ASSERT(false, |
14671 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14672 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14673 | } else { |
14674 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14675 | at::AutoDispatchSkipFunctionalize guard; |
14676 | at::Tensor tmp_output = at::_ops::signbit_out::call(self_, out_); |
14677 | return out;; |
14678 | } |
14679 | } else { |
14680 | at::Tensor tmp_output; |
14681 | { |
14682 | at::AutoDispatchSkipFunctionalize guard; |
14683 | tmp_output = at::_ops::signbit::call(self_); |
14684 | } |
14685 | at::functionalization::impl::replace_(out, tmp_output); |
14686 | at::functionalization::impl::commit_update(out); |
14687 | at::functionalization::impl::sync(out); |
14688 | return out; |
14689 | } |
14690 | } |
14691 | |
14692 | at::Tensor & atan2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
14693 | if (false) { |
14694 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14695 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14696 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14697 | auto self_meta = to_meta(self); |
14698 | auto other_meta = to_meta(other); |
14699 | auto out_meta = to_meta(out); |
14700 | at::AutoDispatchSkipFunctionalize func_guard; |
14701 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14702 | at::_ops::atan2_out::call(self_meta, other_meta, out_meta); |
14703 | } |
14704 | |
14705 | at::Tensor self_; |
14706 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14707 | at::functionalization::impl::sync(self); |
14708 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14709 | } else { |
14710 | self_ = self; |
14711 | } |
14712 | |
14713 | at::Tensor other_; |
14714 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14715 | at::functionalization::impl::sync(other); |
14716 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14717 | } else { |
14718 | other_ = other; |
14719 | } |
14720 | |
14721 | at::Tensor out_; |
14722 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14723 | at::functionalization::impl::sync(out); |
14724 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14725 | } else { |
14726 | out_ = out; |
14727 | } |
14728 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14729 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
14730 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14731 | TORCH_INTERNAL_ASSERT(false, |
14732 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14733 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14734 | } else { |
14735 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14736 | at::AutoDispatchSkipFunctionalize guard; |
14737 | at::Tensor tmp_output = at::_ops::atan2_out::call(self_, other_, out_); |
14738 | return out;; |
14739 | } |
14740 | } else { |
14741 | at::Tensor tmp_output; |
14742 | { |
14743 | at::AutoDispatchSkipFunctionalize guard; |
14744 | tmp_output = at::_ops::atan2::call(self_, other_); |
14745 | } |
14746 | at::functionalization::impl::replace_(out, tmp_output); |
14747 | at::functionalization::impl::commit_update(out); |
14748 | at::functionalization::impl::sync(out); |
14749 | return out; |
14750 | } |
14751 | } |
14752 | |
14753 | at::Tensor & atan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
14754 | if (true) { |
14755 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14756 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14757 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14758 | auto self_meta = to_meta(self); |
14759 | auto other_meta = to_meta(other); |
14760 | at::AutoDispatchSkipFunctionalize func_guard; |
14761 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14762 | at::_ops::atan2_::call(self_meta, other_meta); |
14763 | } |
14764 | |
14765 | at::Tensor self_; |
14766 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14767 | at::functionalization::impl::sync(self); |
14768 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14769 | } else { |
14770 | self_ = self; |
14771 | } |
14772 | |
14773 | at::Tensor other_; |
14774 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
14775 | at::functionalization::impl::sync(other); |
14776 | other_ = at::functionalization::impl::from_functional_tensor(other); |
14777 | } else { |
14778 | other_ = other; |
14779 | } |
14780 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
14781 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
14782 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14783 | TORCH_INTERNAL_ASSERT(false, |
14784 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14785 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14786 | } else { |
14787 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14788 | at::AutoDispatchSkipFunctionalize guard; |
14789 | at::Tensor tmp_output = at::_ops::atan2_::call(self_, other_); |
14790 | return self;; |
14791 | } |
14792 | } else { |
14793 | at::Tensor tmp_output; |
14794 | { |
14795 | at::AutoDispatchSkipFunctionalize guard; |
14796 | tmp_output = at::_ops::atan2::call(self_, other_); |
14797 | } |
14798 | at::functionalization::impl::replace_(self, tmp_output); |
14799 | at::functionalization::impl::commit_update(self); |
14800 | at::functionalization::impl::sync(self); |
14801 | return self; |
14802 | } |
14803 | } |
14804 | |
14805 | void _histogramdd_bin_edges_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) { |
14806 | if (false) { |
14807 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14808 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14809 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14810 | auto self_meta = to_meta(self); |
14811 | auto weight_meta = to_meta(weight); |
14812 | auto out_meta = to_meta(out); |
14813 | at::AutoDispatchSkipFunctionalize func_guard; |
14814 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14815 | at::_ops::_histogramdd_bin_edges_out::call(self_meta, bins, range, weight_meta, density, out_meta); |
14816 | } |
14817 | |
14818 | at::Tensor self_; |
14819 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14820 | at::functionalization::impl::sync(self); |
14821 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14822 | } else { |
14823 | self_ = self; |
14824 | } |
14825 | |
14826 | c10::optional<at::Tensor> weight_; |
14827 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
14828 | at::functionalization::impl::sync(weight); |
14829 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
14830 | } else { |
14831 | weight_ = weight; |
14832 | } |
14833 | |
14834 | ::std::vector<at::Tensor> out_; |
14835 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14836 | at::functionalization::impl::sync(out); |
14837 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14838 | } else { |
14839 | out_ = out.vec(); |
14840 | } |
14841 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14842 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight))) { |
14843 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14844 | TORCH_INTERNAL_ASSERT(false, |
14845 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14846 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14847 | } else { |
14848 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14849 | at::AutoDispatchSkipFunctionalize guard; |
14850 | at::_ops::_histogramdd_bin_edges_out::call(self_, bins, range, weight_, density, out_); |
14851 | ; |
14852 | } |
14853 | } else { |
14854 | ::std::vector<at::Tensor> tmp_output; |
14855 | { |
14856 | at::AutoDispatchSkipFunctionalize guard; |
14857 | tmp_output = at::_ops::_histogramdd_bin_edges::call(self_, bins, range, weight_, density); |
14858 | } |
14859 | at::functionalization::impl::replace_(out, tmp_output); |
14860 | at::functionalization::impl::commit_update(out); |
14861 | at::functionalization::impl::sync(out); |
14862 | |
14863 | } |
14864 | } |
14865 | |
14866 | at::Tensor & _histogramdd_from_bin_tensors_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density, at::Tensor & out) { |
14867 | if (false) { |
14868 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14869 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14870 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14871 | auto self_meta = to_meta(self); |
14872 | auto bins_meta = to_meta(bins); |
14873 | auto weight_meta = to_meta(weight); |
14874 | auto out_meta = to_meta(out); |
14875 | at::AutoDispatchSkipFunctionalize func_guard; |
14876 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14877 | at::_ops::_histogramdd_from_bin_tensors_out::call(self_meta, bins_meta, weight_meta, density, out_meta); |
14878 | } |
14879 | |
14880 | at::Tensor self_; |
14881 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14882 | at::functionalization::impl::sync(self); |
14883 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14884 | } else { |
14885 | self_ = self; |
14886 | } |
14887 | |
14888 | ::std::vector<at::Tensor> bins_; |
14889 | if (at::functionalization::impl::isFunctionalTensor(bins)) { |
14890 | at::functionalization::impl::sync(bins); |
14891 | bins_ = at::functionalization::impl::from_functional_tensor(bins); |
14892 | } else { |
14893 | bins_ = bins.vec(); |
14894 | } |
14895 | |
14896 | c10::optional<at::Tensor> weight_; |
14897 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
14898 | at::functionalization::impl::sync(weight); |
14899 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
14900 | } else { |
14901 | weight_ = weight; |
14902 | } |
14903 | |
14904 | at::Tensor out_; |
14905 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14906 | at::functionalization::impl::sync(out); |
14907 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14908 | } else { |
14909 | out_ = out; |
14910 | } |
14911 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14912 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(bins) || at::functionalization::impl::isFunctionalTensor(weight))) { |
14913 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14914 | TORCH_INTERNAL_ASSERT(false, |
14915 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14916 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14917 | } else { |
14918 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14919 | at::AutoDispatchSkipFunctionalize guard; |
14920 | at::Tensor tmp_output = at::_ops::_histogramdd_from_bin_tensors_out::call(self_, bins_, weight_, density, out_); |
14921 | return out;; |
14922 | } |
14923 | } else { |
14924 | at::Tensor tmp_output; |
14925 | { |
14926 | at::AutoDispatchSkipFunctionalize guard; |
14927 | tmp_output = at::_ops::_histogramdd_from_bin_tensors::call(self_, bins_, weight_, density); |
14928 | } |
14929 | at::functionalization::impl::replace_(out, tmp_output); |
14930 | at::functionalization::impl::commit_update(out); |
14931 | at::functionalization::impl::sync(out); |
14932 | return out; |
14933 | } |
14934 | } |
14935 | |
14936 | at::Tensor & fmod_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { |
14937 | if (false) { |
14938 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14939 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14940 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14941 | auto self_meta = to_meta(self); |
14942 | auto out_meta = to_meta(out); |
14943 | at::AutoDispatchSkipFunctionalize func_guard; |
14944 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14945 | at::_ops::fmod_Scalar_out::call(self_meta, other, out_meta); |
14946 | } |
14947 | |
14948 | at::Tensor self_; |
14949 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
14950 | at::functionalization::impl::sync(self); |
14951 | self_ = at::functionalization::impl::from_functional_tensor(self); |
14952 | } else { |
14953 | self_ = self; |
14954 | } |
14955 | |
14956 | at::Tensor out_; |
14957 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
14958 | at::functionalization::impl::sync(out); |
14959 | out_ = at::functionalization::impl::from_functional_tensor(out); |
14960 | } else { |
14961 | out_ = out; |
14962 | } |
14963 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
14964 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
14965 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
14966 | TORCH_INTERNAL_ASSERT(false, |
14967 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
14968 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
14969 | } else { |
14970 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
14971 | at::AutoDispatchSkipFunctionalize guard; |
14972 | at::Tensor tmp_output = at::_ops::fmod_Scalar_out::call(self_, other, out_); |
14973 | return out;; |
14974 | } |
14975 | } else { |
14976 | at::Tensor tmp_output; |
14977 | { |
14978 | at::AutoDispatchSkipFunctionalize guard; |
14979 | tmp_output = at::_ops::fmod_Scalar::call(self_, other); |
14980 | } |
14981 | at::functionalization::impl::replace_(out, tmp_output); |
14982 | at::functionalization::impl::commit_update(out); |
14983 | at::functionalization::impl::sync(out); |
14984 | return out; |
14985 | } |
14986 | } |
14987 | |
14988 | at::Tensor & fmod__Scalar(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { |
14989 | if (true) { |
14990 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
14991 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
14992 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
14993 | auto self_meta = to_meta(self); |
14994 | at::AutoDispatchSkipFunctionalize func_guard; |
14995 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
14996 | at::_ops::fmod__Scalar::call(self_meta, other); |
14997 | } |
14998 | |
14999 | at::Tensor self_; |
15000 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15001 | at::functionalization::impl::sync(self); |
15002 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15003 | } else { |
15004 | self_ = self; |
15005 | } |
15006 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15007 | if ((false)) { |
15008 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15009 | TORCH_INTERNAL_ASSERT(false, |
15010 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15011 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15012 | } else { |
15013 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15014 | at::AutoDispatchSkipFunctionalize guard; |
15015 | at::Tensor tmp_output = at::_ops::fmod__Scalar::call(self_, other); |
15016 | return self;; |
15017 | } |
15018 | } else { |
15019 | at::Tensor tmp_output; |
15020 | { |
15021 | at::AutoDispatchSkipFunctionalize guard; |
15022 | tmp_output = at::_ops::fmod_Scalar::call(self_, other); |
15023 | } |
15024 | at::functionalization::impl::replace_(self, tmp_output); |
15025 | at::functionalization::impl::commit_update(self); |
15026 | at::functionalization::impl::sync(self); |
15027 | return self; |
15028 | } |
15029 | } |
15030 | |
15031 | at::Tensor & fmod_out_Tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
15032 | if (false) { |
15033 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15034 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15035 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15036 | auto self_meta = to_meta(self); |
15037 | auto other_meta = to_meta(other); |
15038 | auto out_meta = to_meta(out); |
15039 | at::AutoDispatchSkipFunctionalize func_guard; |
15040 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15041 | at::_ops::fmod_Tensor_out::call(self_meta, other_meta, out_meta); |
15042 | } |
15043 | |
15044 | at::Tensor self_; |
15045 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15046 | at::functionalization::impl::sync(self); |
15047 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15048 | } else { |
15049 | self_ = self; |
15050 | } |
15051 | |
15052 | at::Tensor other_; |
15053 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15054 | at::functionalization::impl::sync(other); |
15055 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15056 | } else { |
15057 | other_ = other; |
15058 | } |
15059 | |
15060 | at::Tensor out_; |
15061 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15062 | at::functionalization::impl::sync(out); |
15063 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15064 | } else { |
15065 | out_ = out; |
15066 | } |
15067 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15068 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15069 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15070 | TORCH_INTERNAL_ASSERT(false, |
15071 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15072 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15073 | } else { |
15074 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15075 | at::AutoDispatchSkipFunctionalize guard; |
15076 | at::Tensor tmp_output = at::_ops::fmod_Tensor_out::call(self_, other_, out_); |
15077 | return out;; |
15078 | } |
15079 | } else { |
15080 | at::Tensor tmp_output; |
15081 | { |
15082 | at::AutoDispatchSkipFunctionalize guard; |
15083 | tmp_output = at::_ops::fmod_Tensor::call(self_, other_); |
15084 | } |
15085 | at::functionalization::impl::replace_(out, tmp_output); |
15086 | at::functionalization::impl::commit_update(out); |
15087 | at::functionalization::impl::sync(out); |
15088 | return out; |
15089 | } |
15090 | } |
15091 | |
15092 | at::Tensor & fmod__Tensor(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
15093 | if (true) { |
15094 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15095 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15096 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15097 | auto self_meta = to_meta(self); |
15098 | auto other_meta = to_meta(other); |
15099 | at::AutoDispatchSkipFunctionalize func_guard; |
15100 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15101 | at::_ops::fmod__Tensor::call(self_meta, other_meta); |
15102 | } |
15103 | |
15104 | at::Tensor self_; |
15105 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15106 | at::functionalization::impl::sync(self); |
15107 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15108 | } else { |
15109 | self_ = self; |
15110 | } |
15111 | |
15112 | at::Tensor other_; |
15113 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15114 | at::functionalization::impl::sync(other); |
15115 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15116 | } else { |
15117 | other_ = other; |
15118 | } |
15119 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15120 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
15121 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15122 | TORCH_INTERNAL_ASSERT(false, |
15123 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15124 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15125 | } else { |
15126 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15127 | at::AutoDispatchSkipFunctionalize guard; |
15128 | at::Tensor tmp_output = at::_ops::fmod__Tensor::call(self_, other_); |
15129 | return self;; |
15130 | } |
15131 | } else { |
15132 | at::Tensor tmp_output; |
15133 | { |
15134 | at::AutoDispatchSkipFunctionalize guard; |
15135 | tmp_output = at::_ops::fmod_Tensor::call(self_, other_); |
15136 | } |
15137 | at::functionalization::impl::replace_(self, tmp_output); |
15138 | at::functionalization::impl::commit_update(self); |
15139 | at::functionalization::impl::sync(self); |
15140 | return self; |
15141 | } |
15142 | } |
15143 | |
15144 | at::Tensor & nextafter_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
15145 | if (false) { |
15146 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15147 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15148 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15149 | auto self_meta = to_meta(self); |
15150 | auto other_meta = to_meta(other); |
15151 | auto out_meta = to_meta(out); |
15152 | at::AutoDispatchSkipFunctionalize func_guard; |
15153 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15154 | at::_ops::nextafter_out::call(self_meta, other_meta, out_meta); |
15155 | } |
15156 | |
15157 | at::Tensor self_; |
15158 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15159 | at::functionalization::impl::sync(self); |
15160 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15161 | } else { |
15162 | self_ = self; |
15163 | } |
15164 | |
15165 | at::Tensor other_; |
15166 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15167 | at::functionalization::impl::sync(other); |
15168 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15169 | } else { |
15170 | other_ = other; |
15171 | } |
15172 | |
15173 | at::Tensor out_; |
15174 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15175 | at::functionalization::impl::sync(out); |
15176 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15177 | } else { |
15178 | out_ = out; |
15179 | } |
15180 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15181 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15182 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15183 | TORCH_INTERNAL_ASSERT(false, |
15184 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15185 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15186 | } else { |
15187 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15188 | at::AutoDispatchSkipFunctionalize guard; |
15189 | at::Tensor tmp_output = at::_ops::nextafter_out::call(self_, other_, out_); |
15190 | return out;; |
15191 | } |
15192 | } else { |
15193 | at::Tensor tmp_output; |
15194 | { |
15195 | at::AutoDispatchSkipFunctionalize guard; |
15196 | tmp_output = at::_ops::nextafter::call(self_, other_); |
15197 | } |
15198 | at::functionalization::impl::replace_(out, tmp_output); |
15199 | at::functionalization::impl::commit_update(out); |
15200 | at::functionalization::impl::sync(out); |
15201 | return out; |
15202 | } |
15203 | } |
15204 | |
15205 | at::Tensor & nextafter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { |
15206 | if (true) { |
15207 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15208 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15209 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15210 | auto self_meta = to_meta(self); |
15211 | auto other_meta = to_meta(other); |
15212 | at::AutoDispatchSkipFunctionalize func_guard; |
15213 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15214 | at::_ops::nextafter_::call(self_meta, other_meta); |
15215 | } |
15216 | |
15217 | at::Tensor self_; |
15218 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15219 | at::functionalization::impl::sync(self); |
15220 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15221 | } else { |
15222 | self_ = self; |
15223 | } |
15224 | |
15225 | at::Tensor other_; |
15226 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15227 | at::functionalization::impl::sync(other); |
15228 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15229 | } else { |
15230 | other_ = other; |
15231 | } |
15232 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15233 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
15234 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15235 | TORCH_INTERNAL_ASSERT(false, |
15236 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15237 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15238 | } else { |
15239 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15240 | at::AutoDispatchSkipFunctionalize guard; |
15241 | at::Tensor tmp_output = at::_ops::nextafter_::call(self_, other_); |
15242 | return self;; |
15243 | } |
15244 | } else { |
15245 | at::Tensor tmp_output; |
15246 | { |
15247 | at::AutoDispatchSkipFunctionalize guard; |
15248 | tmp_output = at::_ops::nextafter::call(self_, other_); |
15249 | } |
15250 | at::functionalization::impl::replace_(self, tmp_output); |
15251 | at::functionalization::impl::commit_update(self); |
15252 | at::functionalization::impl::sync(self); |
15253 | return self; |
15254 | } |
15255 | } |
15256 | |
15257 | at::Tensor & minimum_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
15258 | if (false) { |
15259 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15260 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15261 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15262 | auto self_meta = to_meta(self); |
15263 | auto other_meta = to_meta(other); |
15264 | auto out_meta = to_meta(out); |
15265 | at::AutoDispatchSkipFunctionalize func_guard; |
15266 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15267 | at::_ops::minimum_out::call(self_meta, other_meta, out_meta); |
15268 | } |
15269 | |
15270 | at::Tensor self_; |
15271 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15272 | at::functionalization::impl::sync(self); |
15273 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15274 | } else { |
15275 | self_ = self; |
15276 | } |
15277 | |
15278 | at::Tensor other_; |
15279 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15280 | at::functionalization::impl::sync(other); |
15281 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15282 | } else { |
15283 | other_ = other; |
15284 | } |
15285 | |
15286 | at::Tensor out_; |
15287 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15288 | at::functionalization::impl::sync(out); |
15289 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15290 | } else { |
15291 | out_ = out; |
15292 | } |
15293 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15294 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15295 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15296 | TORCH_INTERNAL_ASSERT(false, |
15297 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15298 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15299 | } else { |
15300 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15301 | at::AutoDispatchSkipFunctionalize guard; |
15302 | at::Tensor tmp_output = at::_ops::minimum_out::call(self_, other_, out_); |
15303 | return out;; |
15304 | } |
15305 | } else { |
15306 | at::Tensor tmp_output; |
15307 | { |
15308 | at::AutoDispatchSkipFunctionalize guard; |
15309 | tmp_output = at::_ops::minimum::call(self_, other_); |
15310 | } |
15311 | at::functionalization::impl::replace_(out, tmp_output); |
15312 | at::functionalization::impl::commit_update(out); |
15313 | at::functionalization::impl::sync(out); |
15314 | return out; |
15315 | } |
15316 | } |
15317 | |
15318 | ::std::tuple<at::Tensor &,at::Tensor &> topk_out_values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) { |
15319 | if (false) { |
15320 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15321 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15322 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15323 | auto self_meta = to_meta(self); |
15324 | auto values_meta = to_meta(values); |
15325 | auto indices_meta = to_meta(indices); |
15326 | at::AutoDispatchSkipFunctionalize func_guard; |
15327 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15328 | at::_ops::topk_values::call(self_meta, k, dim, largest, sorted, values_meta, indices_meta); |
15329 | } |
15330 | |
15331 | at::Tensor self_; |
15332 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15333 | at::functionalization::impl::sync(self); |
15334 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15335 | } else { |
15336 | self_ = self; |
15337 | } |
15338 | |
15339 | at::Tensor values_; |
15340 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
15341 | at::functionalization::impl::sync(values); |
15342 | values_ = at::functionalization::impl::from_functional_tensor(values); |
15343 | } else { |
15344 | values_ = values; |
15345 | } |
15346 | |
15347 | at::Tensor indices_; |
15348 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
15349 | at::functionalization::impl::sync(indices); |
15350 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
15351 | } else { |
15352 | indices_ = indices; |
15353 | } |
15354 | if (!(true && at::functionalization::impl::isFunctionalTensor(values) && at::functionalization::impl::isFunctionalTensor(indices))) { |
15355 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15356 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15357 | TORCH_INTERNAL_ASSERT(false, |
15358 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15359 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15360 | } else { |
15361 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15362 | at::AutoDispatchSkipFunctionalize guard; |
15363 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::topk_values::call(self_, k, dim, largest, sorted, values_, indices_); |
15364 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices);; |
15365 | } |
15366 | } else { |
15367 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
15368 | { |
15369 | at::AutoDispatchSkipFunctionalize guard; |
15370 | tmp_output = at::_ops::topk::call(self_, k, dim, largest, sorted); |
15371 | } |
15372 | at::functionalization::impl::replace_(values, std::get<0>(tmp_output)); |
15373 | at::functionalization::impl::commit_update(values); |
15374 | at::functionalization::impl::sync(values); |
15375 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
15376 | at::functionalization::impl::commit_update(indices); |
15377 | at::functionalization::impl::sync(indices); |
15378 | return ::std::tuple<at::Tensor &,at::Tensor &>(values, indices); |
15379 | } |
15380 | } |
15381 | |
15382 | at::Tensor & any_out_all_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
15383 | if (false) { |
15384 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15385 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15386 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15387 | auto self_meta = to_meta(self); |
15388 | auto out_meta = to_meta(out); |
15389 | at::AutoDispatchSkipFunctionalize func_guard; |
15390 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15391 | at::_ops::any_all_out::call(self_meta, out_meta); |
15392 | } |
15393 | |
15394 | at::Tensor self_; |
15395 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15396 | at::functionalization::impl::sync(self); |
15397 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15398 | } else { |
15399 | self_ = self; |
15400 | } |
15401 | |
15402 | at::Tensor out_; |
15403 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15404 | at::functionalization::impl::sync(out); |
15405 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15406 | } else { |
15407 | out_ = out; |
15408 | } |
15409 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15410 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15411 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15412 | TORCH_INTERNAL_ASSERT(false, |
15413 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15414 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15415 | } else { |
15416 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15417 | at::AutoDispatchSkipFunctionalize guard; |
15418 | at::Tensor tmp_output = at::_ops::any_all_out::call(self_, out_); |
15419 | return out;; |
15420 | } |
15421 | } else { |
15422 | at::Tensor tmp_output; |
15423 | { |
15424 | at::AutoDispatchSkipFunctionalize guard; |
15425 | tmp_output = at::_ops::any::call(self_); |
15426 | } |
15427 | at::functionalization::impl::replace_(out, tmp_output); |
15428 | at::functionalization::impl::commit_update(out); |
15429 | at::functionalization::impl::sync(out); |
15430 | return out; |
15431 | } |
15432 | } |
15433 | |
15434 | void _foreach_mul_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
15435 | if (false) { |
15436 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15437 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15438 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15439 | auto self_meta = to_meta(self); |
15440 | auto out_meta = to_meta(out); |
15441 | at::AutoDispatchSkipFunctionalize func_guard; |
15442 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15443 | at::_ops::_foreach_mul_Scalar_out::call(self_meta, scalar, out_meta); |
15444 | } |
15445 | |
15446 | ::std::vector<at::Tensor> self_; |
15447 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15448 | at::functionalization::impl::sync(self); |
15449 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15450 | } else { |
15451 | self_ = self.vec(); |
15452 | } |
15453 | |
15454 | ::std::vector<at::Tensor> out_; |
15455 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15456 | at::functionalization::impl::sync(out); |
15457 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15458 | } else { |
15459 | out_ = out.vec(); |
15460 | } |
15461 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15462 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15463 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15464 | TORCH_INTERNAL_ASSERT(false, |
15465 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15466 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15467 | } else { |
15468 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15469 | at::AutoDispatchSkipFunctionalize guard; |
15470 | at::_ops::_foreach_mul_Scalar_out::call(self_, scalar, out_); |
15471 | ; |
15472 | } |
15473 | } else { |
15474 | ::std::vector<at::Tensor> tmp_output; |
15475 | { |
15476 | at::AutoDispatchSkipFunctionalize guard; |
15477 | tmp_output = at::_ops::_foreach_mul_Scalar::call(self_, scalar); |
15478 | } |
15479 | at::functionalization::impl::replace_(out, tmp_output); |
15480 | at::functionalization::impl::commit_update(out); |
15481 | at::functionalization::impl::sync(out); |
15482 | |
15483 | } |
15484 | } |
15485 | |
15486 | void _foreach_mul__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { |
15487 | if (true) { |
15488 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15489 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15490 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15491 | auto self_meta = to_meta(self); |
15492 | at::AutoDispatchSkipFunctionalize func_guard; |
15493 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15494 | at::_ops::_foreach_mul__Scalar::call(self_meta, scalar); |
15495 | } |
15496 | |
15497 | ::std::vector<at::Tensor> self_; |
15498 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15499 | at::functionalization::impl::sync(self); |
15500 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15501 | } else { |
15502 | self_ = self.vec(); |
15503 | } |
15504 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15505 | if ((false)) { |
15506 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15507 | TORCH_INTERNAL_ASSERT(false, |
15508 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15509 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15510 | } else { |
15511 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15512 | at::AutoDispatchSkipFunctionalize guard; |
15513 | at::_ops::_foreach_mul__Scalar::call(self_, scalar); |
15514 | ; |
15515 | } |
15516 | } else { |
15517 | ::std::vector<at::Tensor> tmp_output; |
15518 | { |
15519 | at::AutoDispatchSkipFunctionalize guard; |
15520 | tmp_output = at::_ops::_foreach_mul_Scalar::call(self_, scalar); |
15521 | } |
15522 | at::functionalization::impl::replace_(self, tmp_output); |
15523 | at::functionalization::impl::commit_update(self); |
15524 | at::functionalization::impl::sync(self); |
15525 | |
15526 | } |
15527 | } |
15528 | |
15529 | void _foreach_div_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
15530 | if (false) { |
15531 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15532 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15533 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15534 | auto self_meta = to_meta(self); |
15535 | auto out_meta = to_meta(out); |
15536 | at::AutoDispatchSkipFunctionalize func_guard; |
15537 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15538 | at::_ops::_foreach_div_Scalar_out::call(self_meta, scalar, out_meta); |
15539 | } |
15540 | |
15541 | ::std::vector<at::Tensor> self_; |
15542 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15543 | at::functionalization::impl::sync(self); |
15544 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15545 | } else { |
15546 | self_ = self.vec(); |
15547 | } |
15548 | |
15549 | ::std::vector<at::Tensor> out_; |
15550 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15551 | at::functionalization::impl::sync(out); |
15552 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15553 | } else { |
15554 | out_ = out.vec(); |
15555 | } |
15556 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15557 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15558 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15559 | TORCH_INTERNAL_ASSERT(false, |
15560 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15561 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15562 | } else { |
15563 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15564 | at::AutoDispatchSkipFunctionalize guard; |
15565 | at::_ops::_foreach_div_Scalar_out::call(self_, scalar, out_); |
15566 | ; |
15567 | } |
15568 | } else { |
15569 | ::std::vector<at::Tensor> tmp_output; |
15570 | { |
15571 | at::AutoDispatchSkipFunctionalize guard; |
15572 | tmp_output = at::_ops::_foreach_div_Scalar::call(self_, scalar); |
15573 | } |
15574 | at::functionalization::impl::replace_(out, tmp_output); |
15575 | at::functionalization::impl::commit_update(out); |
15576 | at::functionalization::impl::sync(out); |
15577 | |
15578 | } |
15579 | } |
15580 | |
15581 | void _foreach_div__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { |
15582 | if (true) { |
15583 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15584 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15585 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15586 | auto self_meta = to_meta(self); |
15587 | at::AutoDispatchSkipFunctionalize func_guard; |
15588 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15589 | at::_ops::_foreach_div__Scalar::call(self_meta, scalar); |
15590 | } |
15591 | |
15592 | ::std::vector<at::Tensor> self_; |
15593 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15594 | at::functionalization::impl::sync(self); |
15595 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15596 | } else { |
15597 | self_ = self.vec(); |
15598 | } |
15599 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15600 | if ((false)) { |
15601 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15602 | TORCH_INTERNAL_ASSERT(false, |
15603 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15604 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15605 | } else { |
15606 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15607 | at::AutoDispatchSkipFunctionalize guard; |
15608 | at::_ops::_foreach_div__Scalar::call(self_, scalar); |
15609 | ; |
15610 | } |
15611 | } else { |
15612 | ::std::vector<at::Tensor> tmp_output; |
15613 | { |
15614 | at::AutoDispatchSkipFunctionalize guard; |
15615 | tmp_output = at::_ops::_foreach_div_Scalar::call(self_, scalar); |
15616 | } |
15617 | at::functionalization::impl::replace_(self, tmp_output); |
15618 | at::functionalization::impl::commit_update(self); |
15619 | at::functionalization::impl::sync(self); |
15620 | |
15621 | } |
15622 | } |
15623 | |
15624 | void _foreach_clamp_max_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
15625 | if (false) { |
15626 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15627 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15628 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15629 | auto self_meta = to_meta(self); |
15630 | auto out_meta = to_meta(out); |
15631 | at::AutoDispatchSkipFunctionalize func_guard; |
15632 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15633 | at::_ops::_foreach_clamp_max_Scalar_out::call(self_meta, scalar, out_meta); |
15634 | } |
15635 | |
15636 | ::std::vector<at::Tensor> self_; |
15637 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15638 | at::functionalization::impl::sync(self); |
15639 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15640 | } else { |
15641 | self_ = self.vec(); |
15642 | } |
15643 | |
15644 | ::std::vector<at::Tensor> out_; |
15645 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15646 | at::functionalization::impl::sync(out); |
15647 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15648 | } else { |
15649 | out_ = out.vec(); |
15650 | } |
15651 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15652 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15653 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15654 | TORCH_INTERNAL_ASSERT(false, |
15655 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15656 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15657 | } else { |
15658 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15659 | at::AutoDispatchSkipFunctionalize guard; |
15660 | at::_ops::_foreach_clamp_max_Scalar_out::call(self_, scalar, out_); |
15661 | ; |
15662 | } |
15663 | } else { |
15664 | ::std::vector<at::Tensor> tmp_output; |
15665 | { |
15666 | at::AutoDispatchSkipFunctionalize guard; |
15667 | tmp_output = at::_ops::_foreach_clamp_max_Scalar::call(self_, scalar); |
15668 | } |
15669 | at::functionalization::impl::replace_(out, tmp_output); |
15670 | at::functionalization::impl::commit_update(out); |
15671 | at::functionalization::impl::sync(out); |
15672 | |
15673 | } |
15674 | } |
15675 | |
15676 | void _foreach_clamp_max__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { |
15677 | if (true) { |
15678 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15679 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15680 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15681 | auto self_meta = to_meta(self); |
15682 | at::AutoDispatchSkipFunctionalize func_guard; |
15683 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15684 | at::_ops::_foreach_clamp_max__Scalar::call(self_meta, scalar); |
15685 | } |
15686 | |
15687 | ::std::vector<at::Tensor> self_; |
15688 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15689 | at::functionalization::impl::sync(self); |
15690 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15691 | } else { |
15692 | self_ = self.vec(); |
15693 | } |
15694 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15695 | if ((false)) { |
15696 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15697 | TORCH_INTERNAL_ASSERT(false, |
15698 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15699 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15700 | } else { |
15701 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15702 | at::AutoDispatchSkipFunctionalize guard; |
15703 | at::_ops::_foreach_clamp_max__Scalar::call(self_, scalar); |
15704 | ; |
15705 | } |
15706 | } else { |
15707 | ::std::vector<at::Tensor> tmp_output; |
15708 | { |
15709 | at::AutoDispatchSkipFunctionalize guard; |
15710 | tmp_output = at::_ops::_foreach_clamp_max_Scalar::call(self_, scalar); |
15711 | } |
15712 | at::functionalization::impl::replace_(self, tmp_output); |
15713 | at::functionalization::impl::commit_update(self); |
15714 | at::functionalization::impl::sync(self); |
15715 | |
15716 | } |
15717 | } |
15718 | |
15719 | void _foreach_minimum_out_Scalar_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { |
15720 | if (false) { |
15721 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15722 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15723 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15724 | auto self_meta = to_meta(self); |
15725 | auto out_meta = to_meta(out); |
15726 | at::AutoDispatchSkipFunctionalize func_guard; |
15727 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15728 | at::_ops::_foreach_minimum_Scalar_out::call(self_meta, scalar, out_meta); |
15729 | } |
15730 | |
15731 | ::std::vector<at::Tensor> self_; |
15732 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15733 | at::functionalization::impl::sync(self); |
15734 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15735 | } else { |
15736 | self_ = self.vec(); |
15737 | } |
15738 | |
15739 | ::std::vector<at::Tensor> out_; |
15740 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15741 | at::functionalization::impl::sync(out); |
15742 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15743 | } else { |
15744 | out_ = out.vec(); |
15745 | } |
15746 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15747 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
15748 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15749 | TORCH_INTERNAL_ASSERT(false, |
15750 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15751 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15752 | } else { |
15753 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15754 | at::AutoDispatchSkipFunctionalize guard; |
15755 | at::_ops::_foreach_minimum_Scalar_out::call(self_, scalar, out_); |
15756 | ; |
15757 | } |
15758 | } else { |
15759 | ::std::vector<at::Tensor> tmp_output; |
15760 | { |
15761 | at::AutoDispatchSkipFunctionalize guard; |
15762 | tmp_output = at::_ops::_foreach_minimum_Scalar::call(self_, scalar); |
15763 | } |
15764 | at::functionalization::impl::replace_(out, tmp_output); |
15765 | at::functionalization::impl::commit_update(out); |
15766 | at::functionalization::impl::sync(out); |
15767 | |
15768 | } |
15769 | } |
15770 | |
15771 | void _foreach_minimum__Scalar(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { |
15772 | if (true) { |
15773 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15774 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15775 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15776 | auto self_meta = to_meta(self); |
15777 | at::AutoDispatchSkipFunctionalize func_guard; |
15778 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15779 | at::_ops::_foreach_minimum__Scalar::call(self_meta, scalar); |
15780 | } |
15781 | |
15782 | ::std::vector<at::Tensor> self_; |
15783 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15784 | at::functionalization::impl::sync(self); |
15785 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15786 | } else { |
15787 | self_ = self.vec(); |
15788 | } |
15789 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15790 | if ((false)) { |
15791 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15792 | TORCH_INTERNAL_ASSERT(false, |
15793 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15794 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15795 | } else { |
15796 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15797 | at::AutoDispatchSkipFunctionalize guard; |
15798 | at::_ops::_foreach_minimum__Scalar::call(self_, scalar); |
15799 | ; |
15800 | } |
15801 | } else { |
15802 | ::std::vector<at::Tensor> tmp_output; |
15803 | { |
15804 | at::AutoDispatchSkipFunctionalize guard; |
15805 | tmp_output = at::_ops::_foreach_minimum_Scalar::call(self_, scalar); |
15806 | } |
15807 | at::functionalization::impl::replace_(self, tmp_output); |
15808 | at::functionalization::impl::commit_update(self); |
15809 | at::functionalization::impl::sync(self); |
15810 | |
15811 | } |
15812 | } |
15813 | |
15814 | void _foreach_mul_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { |
15815 | if (false) { |
15816 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15817 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15818 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15819 | auto self_meta = to_meta(self); |
15820 | auto other_meta = to_meta(other); |
15821 | auto out_meta = to_meta(out); |
15822 | at::AutoDispatchSkipFunctionalize func_guard; |
15823 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15824 | at::_ops::_foreach_mul_List_out::call(self_meta, other_meta, out_meta); |
15825 | } |
15826 | |
15827 | ::std::vector<at::Tensor> self_; |
15828 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15829 | at::functionalization::impl::sync(self); |
15830 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15831 | } else { |
15832 | self_ = self.vec(); |
15833 | } |
15834 | |
15835 | ::std::vector<at::Tensor> other_; |
15836 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15837 | at::functionalization::impl::sync(other); |
15838 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15839 | } else { |
15840 | other_ = other.vec(); |
15841 | } |
15842 | |
15843 | ::std::vector<at::Tensor> out_; |
15844 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15845 | at::functionalization::impl::sync(out); |
15846 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15847 | } else { |
15848 | out_ = out.vec(); |
15849 | } |
15850 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15851 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15852 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15853 | TORCH_INTERNAL_ASSERT(false, |
15854 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15855 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15856 | } else { |
15857 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15858 | at::AutoDispatchSkipFunctionalize guard; |
15859 | at::_ops::_foreach_mul_List_out::call(self_, other_, out_); |
15860 | ; |
15861 | } |
15862 | } else { |
15863 | ::std::vector<at::Tensor> tmp_output; |
15864 | { |
15865 | at::AutoDispatchSkipFunctionalize guard; |
15866 | tmp_output = at::_ops::_foreach_mul_List::call(self_, other_); |
15867 | } |
15868 | at::functionalization::impl::replace_(out, tmp_output); |
15869 | at::functionalization::impl::commit_update(out); |
15870 | at::functionalization::impl::sync(out); |
15871 | |
15872 | } |
15873 | } |
15874 | |
15875 | void _foreach_mul__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { |
15876 | if (true) { |
15877 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15878 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15879 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15880 | auto self_meta = to_meta(self); |
15881 | auto other_meta = to_meta(other); |
15882 | at::AutoDispatchSkipFunctionalize func_guard; |
15883 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15884 | at::_ops::_foreach_mul__List::call(self_meta, other_meta); |
15885 | } |
15886 | |
15887 | ::std::vector<at::Tensor> self_; |
15888 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15889 | at::functionalization::impl::sync(self); |
15890 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15891 | } else { |
15892 | self_ = self.vec(); |
15893 | } |
15894 | |
15895 | ::std::vector<at::Tensor> other_; |
15896 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15897 | at::functionalization::impl::sync(other); |
15898 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15899 | } else { |
15900 | other_ = other.vec(); |
15901 | } |
15902 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
15903 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
15904 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15905 | TORCH_INTERNAL_ASSERT(false, |
15906 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15907 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15908 | } else { |
15909 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15910 | at::AutoDispatchSkipFunctionalize guard; |
15911 | at::_ops::_foreach_mul__List::call(self_, other_); |
15912 | ; |
15913 | } |
15914 | } else { |
15915 | ::std::vector<at::Tensor> tmp_output; |
15916 | { |
15917 | at::AutoDispatchSkipFunctionalize guard; |
15918 | tmp_output = at::_ops::_foreach_mul_List::call(self_, other_); |
15919 | } |
15920 | at::functionalization::impl::replace_(self, tmp_output); |
15921 | at::functionalization::impl::commit_update(self); |
15922 | at::functionalization::impl::sync(self); |
15923 | |
15924 | } |
15925 | } |
15926 | |
15927 | void _foreach_div_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { |
15928 | if (false) { |
15929 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15930 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15931 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15932 | auto self_meta = to_meta(self); |
15933 | auto other_meta = to_meta(other); |
15934 | auto out_meta = to_meta(out); |
15935 | at::AutoDispatchSkipFunctionalize func_guard; |
15936 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15937 | at::_ops::_foreach_div_List_out::call(self_meta, other_meta, out_meta); |
15938 | } |
15939 | |
15940 | ::std::vector<at::Tensor> self_; |
15941 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
15942 | at::functionalization::impl::sync(self); |
15943 | self_ = at::functionalization::impl::from_functional_tensor(self); |
15944 | } else { |
15945 | self_ = self.vec(); |
15946 | } |
15947 | |
15948 | ::std::vector<at::Tensor> other_; |
15949 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
15950 | at::functionalization::impl::sync(other); |
15951 | other_ = at::functionalization::impl::from_functional_tensor(other); |
15952 | } else { |
15953 | other_ = other.vec(); |
15954 | } |
15955 | |
15956 | ::std::vector<at::Tensor> out_; |
15957 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
15958 | at::functionalization::impl::sync(out); |
15959 | out_ = at::functionalization::impl::from_functional_tensor(out); |
15960 | } else { |
15961 | out_ = out.vec(); |
15962 | } |
15963 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
15964 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
15965 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
15966 | TORCH_INTERNAL_ASSERT(false, |
15967 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
15968 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
15969 | } else { |
15970 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
15971 | at::AutoDispatchSkipFunctionalize guard; |
15972 | at::_ops::_foreach_div_List_out::call(self_, other_, out_); |
15973 | ; |
15974 | } |
15975 | } else { |
15976 | ::std::vector<at::Tensor> tmp_output; |
15977 | { |
15978 | at::AutoDispatchSkipFunctionalize guard; |
15979 | tmp_output = at::_ops::_foreach_div_List::call(self_, other_); |
15980 | } |
15981 | at::functionalization::impl::replace_(out, tmp_output); |
15982 | at::functionalization::impl::commit_update(out); |
15983 | at::functionalization::impl::sync(out); |
15984 | |
15985 | } |
15986 | } |
15987 | |
15988 | void _foreach_div__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { |
15989 | if (true) { |
15990 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
15991 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
15992 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
15993 | auto self_meta = to_meta(self); |
15994 | auto other_meta = to_meta(other); |
15995 | at::AutoDispatchSkipFunctionalize func_guard; |
15996 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
15997 | at::_ops::_foreach_div__List::call(self_meta, other_meta); |
15998 | } |
15999 | |
16000 | ::std::vector<at::Tensor> self_; |
16001 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16002 | at::functionalization::impl::sync(self); |
16003 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16004 | } else { |
16005 | self_ = self.vec(); |
16006 | } |
16007 | |
16008 | ::std::vector<at::Tensor> other_; |
16009 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
16010 | at::functionalization::impl::sync(other); |
16011 | other_ = at::functionalization::impl::from_functional_tensor(other); |
16012 | } else { |
16013 | other_ = other.vec(); |
16014 | } |
16015 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16016 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
16017 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16018 | TORCH_INTERNAL_ASSERT(false, |
16019 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16020 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16021 | } else { |
16022 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16023 | at::AutoDispatchSkipFunctionalize guard; |
16024 | at::_ops::_foreach_div__List::call(self_, other_); |
16025 | ; |
16026 | } |
16027 | } else { |
16028 | ::std::vector<at::Tensor> tmp_output; |
16029 | { |
16030 | at::AutoDispatchSkipFunctionalize guard; |
16031 | tmp_output = at::_ops::_foreach_div_List::call(self_, other_); |
16032 | } |
16033 | at::functionalization::impl::replace_(self, tmp_output); |
16034 | at::functionalization::impl::commit_update(self); |
16035 | at::functionalization::impl::sync(self); |
16036 | |
16037 | } |
16038 | } |
16039 | |
16040 | void _foreach_clamp_max_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { |
16041 | if (false) { |
16042 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16043 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16044 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16045 | auto self_meta = to_meta(self); |
16046 | auto other_meta = to_meta(other); |
16047 | auto out_meta = to_meta(out); |
16048 | at::AutoDispatchSkipFunctionalize func_guard; |
16049 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16050 | at::_ops::_foreach_clamp_max_List_out::call(self_meta, other_meta, out_meta); |
16051 | } |
16052 | |
16053 | ::std::vector<at::Tensor> self_; |
16054 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16055 | at::functionalization::impl::sync(self); |
16056 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16057 | } else { |
16058 | self_ = self.vec(); |
16059 | } |
16060 | |
16061 | ::std::vector<at::Tensor> other_; |
16062 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
16063 | at::functionalization::impl::sync(other); |
16064 | other_ = at::functionalization::impl::from_functional_tensor(other); |
16065 | } else { |
16066 | other_ = other.vec(); |
16067 | } |
16068 | |
16069 | ::std::vector<at::Tensor> out_; |
16070 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16071 | at::functionalization::impl::sync(out); |
16072 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16073 | } else { |
16074 | out_ = out.vec(); |
16075 | } |
16076 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16077 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
16078 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16079 | TORCH_INTERNAL_ASSERT(false, |
16080 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16081 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16082 | } else { |
16083 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16084 | at::AutoDispatchSkipFunctionalize guard; |
16085 | at::_ops::_foreach_clamp_max_List_out::call(self_, other_, out_); |
16086 | ; |
16087 | } |
16088 | } else { |
16089 | ::std::vector<at::Tensor> tmp_output; |
16090 | { |
16091 | at::AutoDispatchSkipFunctionalize guard; |
16092 | tmp_output = at::_ops::_foreach_clamp_max_List::call(self_, other_); |
16093 | } |
16094 | at::functionalization::impl::replace_(out, tmp_output); |
16095 | at::functionalization::impl::commit_update(out); |
16096 | at::functionalization::impl::sync(out); |
16097 | |
16098 | } |
16099 | } |
16100 | |
16101 | void _foreach_clamp_max__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { |
16102 | if (true) { |
16103 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16104 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16105 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16106 | auto self_meta = to_meta(self); |
16107 | auto other_meta = to_meta(other); |
16108 | at::AutoDispatchSkipFunctionalize func_guard; |
16109 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16110 | at::_ops::_foreach_clamp_max__List::call(self_meta, other_meta); |
16111 | } |
16112 | |
16113 | ::std::vector<at::Tensor> self_; |
16114 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16115 | at::functionalization::impl::sync(self); |
16116 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16117 | } else { |
16118 | self_ = self.vec(); |
16119 | } |
16120 | |
16121 | ::std::vector<at::Tensor> other_; |
16122 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
16123 | at::functionalization::impl::sync(other); |
16124 | other_ = at::functionalization::impl::from_functional_tensor(other); |
16125 | } else { |
16126 | other_ = other.vec(); |
16127 | } |
16128 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16129 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
16130 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16131 | TORCH_INTERNAL_ASSERT(false, |
16132 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16133 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16134 | } else { |
16135 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16136 | at::AutoDispatchSkipFunctionalize guard; |
16137 | at::_ops::_foreach_clamp_max__List::call(self_, other_); |
16138 | ; |
16139 | } |
16140 | } else { |
16141 | ::std::vector<at::Tensor> tmp_output; |
16142 | { |
16143 | at::AutoDispatchSkipFunctionalize guard; |
16144 | tmp_output = at::_ops::_foreach_clamp_max_List::call(self_, other_); |
16145 | } |
16146 | at::functionalization::impl::replace_(self, tmp_output); |
16147 | at::functionalization::impl::commit_update(self); |
16148 | at::functionalization::impl::sync(self); |
16149 | |
16150 | } |
16151 | } |
16152 | |
16153 | void _foreach_minimum_out_List_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { |
16154 | if (false) { |
16155 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16156 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16157 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16158 | auto self_meta = to_meta(self); |
16159 | auto other_meta = to_meta(other); |
16160 | auto out_meta = to_meta(out); |
16161 | at::AutoDispatchSkipFunctionalize func_guard; |
16162 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16163 | at::_ops::_foreach_minimum_List_out::call(self_meta, other_meta, out_meta); |
16164 | } |
16165 | |
16166 | ::std::vector<at::Tensor> self_; |
16167 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16168 | at::functionalization::impl::sync(self); |
16169 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16170 | } else { |
16171 | self_ = self.vec(); |
16172 | } |
16173 | |
16174 | ::std::vector<at::Tensor> other_; |
16175 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
16176 | at::functionalization::impl::sync(other); |
16177 | other_ = at::functionalization::impl::from_functional_tensor(other); |
16178 | } else { |
16179 | other_ = other.vec(); |
16180 | } |
16181 | |
16182 | ::std::vector<at::Tensor> out_; |
16183 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16184 | at::functionalization::impl::sync(out); |
16185 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16186 | } else { |
16187 | out_ = out.vec(); |
16188 | } |
16189 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16190 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
16191 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16192 | TORCH_INTERNAL_ASSERT(false, |
16193 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16194 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16195 | } else { |
16196 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16197 | at::AutoDispatchSkipFunctionalize guard; |
16198 | at::_ops::_foreach_minimum_List_out::call(self_, other_, out_); |
16199 | ; |
16200 | } |
16201 | } else { |
16202 | ::std::vector<at::Tensor> tmp_output; |
16203 | { |
16204 | at::AutoDispatchSkipFunctionalize guard; |
16205 | tmp_output = at::_ops::_foreach_minimum_List::call(self_, other_); |
16206 | } |
16207 | at::functionalization::impl::replace_(out, tmp_output); |
16208 | at::functionalization::impl::commit_update(out); |
16209 | at::functionalization::impl::sync(out); |
16210 | |
16211 | } |
16212 | } |
16213 | |
16214 | void _foreach_minimum__List(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { |
16215 | if (true) { |
16216 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16217 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16218 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16219 | auto self_meta = to_meta(self); |
16220 | auto other_meta = to_meta(other); |
16221 | at::AutoDispatchSkipFunctionalize func_guard; |
16222 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16223 | at::_ops::_foreach_minimum__List::call(self_meta, other_meta); |
16224 | } |
16225 | |
16226 | ::std::vector<at::Tensor> self_; |
16227 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16228 | at::functionalization::impl::sync(self); |
16229 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16230 | } else { |
16231 | self_ = self.vec(); |
16232 | } |
16233 | |
16234 | ::std::vector<at::Tensor> other_; |
16235 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
16236 | at::functionalization::impl::sync(other); |
16237 | other_ = at::functionalization::impl::from_functional_tensor(other); |
16238 | } else { |
16239 | other_ = other.vec(); |
16240 | } |
16241 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16242 | if ((false || at::functionalization::impl::isFunctionalTensor(other))) { |
16243 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16244 | TORCH_INTERNAL_ASSERT(false, |
16245 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16246 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16247 | } else { |
16248 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16249 | at::AutoDispatchSkipFunctionalize guard; |
16250 | at::_ops::_foreach_minimum__List::call(self_, other_); |
16251 | ; |
16252 | } |
16253 | } else { |
16254 | ::std::vector<at::Tensor> tmp_output; |
16255 | { |
16256 | at::AutoDispatchSkipFunctionalize guard; |
16257 | tmp_output = at::_ops::_foreach_minimum_List::call(self_, other_); |
16258 | } |
16259 | at::functionalization::impl::replace_(self, tmp_output); |
16260 | at::functionalization::impl::commit_update(self); |
16261 | at::functionalization::impl::sync(self); |
16262 | |
16263 | } |
16264 | } |
16265 | |
16266 | void _foreach_div_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
16267 | if (false) { |
16268 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16269 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16270 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16271 | auto self_meta = to_meta(self); |
16272 | auto out_meta = to_meta(out); |
16273 | at::AutoDispatchSkipFunctionalize func_guard; |
16274 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16275 | at::_ops::_foreach_div_ScalarList_out::call(self_meta, scalars, out_meta); |
16276 | } |
16277 | |
16278 | ::std::vector<at::Tensor> self_; |
16279 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16280 | at::functionalization::impl::sync(self); |
16281 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16282 | } else { |
16283 | self_ = self.vec(); |
16284 | } |
16285 | |
16286 | ::std::vector<at::Tensor> out_; |
16287 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16288 | at::functionalization::impl::sync(out); |
16289 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16290 | } else { |
16291 | out_ = out.vec(); |
16292 | } |
16293 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16294 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16295 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16296 | TORCH_INTERNAL_ASSERT(false, |
16297 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16298 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16299 | } else { |
16300 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16301 | at::AutoDispatchSkipFunctionalize guard; |
16302 | at::_ops::_foreach_div_ScalarList_out::call(self_, scalars, out_); |
16303 | ; |
16304 | } |
16305 | } else { |
16306 | ::std::vector<at::Tensor> tmp_output; |
16307 | { |
16308 | at::AutoDispatchSkipFunctionalize guard; |
16309 | tmp_output = at::_ops::_foreach_div_ScalarList::call(self_, scalars); |
16310 | } |
16311 | at::functionalization::impl::replace_(out, tmp_output); |
16312 | at::functionalization::impl::commit_update(out); |
16313 | at::functionalization::impl::sync(out); |
16314 | |
16315 | } |
16316 | } |
16317 | |
16318 | void _foreach_div__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
16319 | if (true) { |
16320 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16321 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16322 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16323 | auto self_meta = to_meta(self); |
16324 | at::AutoDispatchSkipFunctionalize func_guard; |
16325 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16326 | at::_ops::_foreach_div__ScalarList::call(self_meta, scalars); |
16327 | } |
16328 | |
16329 | ::std::vector<at::Tensor> self_; |
16330 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16331 | at::functionalization::impl::sync(self); |
16332 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16333 | } else { |
16334 | self_ = self.vec(); |
16335 | } |
16336 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16337 | if ((false)) { |
16338 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16339 | TORCH_INTERNAL_ASSERT(false, |
16340 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16341 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16342 | } else { |
16343 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16344 | at::AutoDispatchSkipFunctionalize guard; |
16345 | at::_ops::_foreach_div__ScalarList::call(self_, scalars); |
16346 | ; |
16347 | } |
16348 | } else { |
16349 | ::std::vector<at::Tensor> tmp_output; |
16350 | { |
16351 | at::AutoDispatchSkipFunctionalize guard; |
16352 | tmp_output = at::_ops::_foreach_div_ScalarList::call(self_, scalars); |
16353 | } |
16354 | at::functionalization::impl::replace_(self, tmp_output); |
16355 | at::functionalization::impl::commit_update(self); |
16356 | at::functionalization::impl::sync(self); |
16357 | |
16358 | } |
16359 | } |
16360 | |
16361 | void _foreach_mul_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
16362 | if (false) { |
16363 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16364 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16365 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16366 | auto self_meta = to_meta(self); |
16367 | auto out_meta = to_meta(out); |
16368 | at::AutoDispatchSkipFunctionalize func_guard; |
16369 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16370 | at::_ops::_foreach_mul_ScalarList_out::call(self_meta, scalars, out_meta); |
16371 | } |
16372 | |
16373 | ::std::vector<at::Tensor> self_; |
16374 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16375 | at::functionalization::impl::sync(self); |
16376 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16377 | } else { |
16378 | self_ = self.vec(); |
16379 | } |
16380 | |
16381 | ::std::vector<at::Tensor> out_; |
16382 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16383 | at::functionalization::impl::sync(out); |
16384 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16385 | } else { |
16386 | out_ = out.vec(); |
16387 | } |
16388 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16389 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16390 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16391 | TORCH_INTERNAL_ASSERT(false, |
16392 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16393 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16394 | } else { |
16395 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16396 | at::AutoDispatchSkipFunctionalize guard; |
16397 | at::_ops::_foreach_mul_ScalarList_out::call(self_, scalars, out_); |
16398 | ; |
16399 | } |
16400 | } else { |
16401 | ::std::vector<at::Tensor> tmp_output; |
16402 | { |
16403 | at::AutoDispatchSkipFunctionalize guard; |
16404 | tmp_output = at::_ops::_foreach_mul_ScalarList::call(self_, scalars); |
16405 | } |
16406 | at::functionalization::impl::replace_(out, tmp_output); |
16407 | at::functionalization::impl::commit_update(out); |
16408 | at::functionalization::impl::sync(out); |
16409 | |
16410 | } |
16411 | } |
16412 | |
16413 | void _foreach_mul__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
16414 | if (true) { |
16415 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16416 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16417 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16418 | auto self_meta = to_meta(self); |
16419 | at::AutoDispatchSkipFunctionalize func_guard; |
16420 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16421 | at::_ops::_foreach_mul__ScalarList::call(self_meta, scalars); |
16422 | } |
16423 | |
16424 | ::std::vector<at::Tensor> self_; |
16425 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16426 | at::functionalization::impl::sync(self); |
16427 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16428 | } else { |
16429 | self_ = self.vec(); |
16430 | } |
16431 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16432 | if ((false)) { |
16433 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16434 | TORCH_INTERNAL_ASSERT(false, |
16435 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16436 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16437 | } else { |
16438 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16439 | at::AutoDispatchSkipFunctionalize guard; |
16440 | at::_ops::_foreach_mul__ScalarList::call(self_, scalars); |
16441 | ; |
16442 | } |
16443 | } else { |
16444 | ::std::vector<at::Tensor> tmp_output; |
16445 | { |
16446 | at::AutoDispatchSkipFunctionalize guard; |
16447 | tmp_output = at::_ops::_foreach_mul_ScalarList::call(self_, scalars); |
16448 | } |
16449 | at::functionalization::impl::replace_(self, tmp_output); |
16450 | at::functionalization::impl::commit_update(self); |
16451 | at::functionalization::impl::sync(self); |
16452 | |
16453 | } |
16454 | } |
16455 | |
16456 | void _foreach_clamp_max_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
16457 | if (false) { |
16458 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16459 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16460 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16461 | auto self_meta = to_meta(self); |
16462 | auto out_meta = to_meta(out); |
16463 | at::AutoDispatchSkipFunctionalize func_guard; |
16464 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16465 | at::_ops::_foreach_clamp_max_ScalarList_out::call(self_meta, scalars, out_meta); |
16466 | } |
16467 | |
16468 | ::std::vector<at::Tensor> self_; |
16469 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16470 | at::functionalization::impl::sync(self); |
16471 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16472 | } else { |
16473 | self_ = self.vec(); |
16474 | } |
16475 | |
16476 | ::std::vector<at::Tensor> out_; |
16477 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16478 | at::functionalization::impl::sync(out); |
16479 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16480 | } else { |
16481 | out_ = out.vec(); |
16482 | } |
16483 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16484 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16485 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16486 | TORCH_INTERNAL_ASSERT(false, |
16487 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16488 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16489 | } else { |
16490 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16491 | at::AutoDispatchSkipFunctionalize guard; |
16492 | at::_ops::_foreach_clamp_max_ScalarList_out::call(self_, scalars, out_); |
16493 | ; |
16494 | } |
16495 | } else { |
16496 | ::std::vector<at::Tensor> tmp_output; |
16497 | { |
16498 | at::AutoDispatchSkipFunctionalize guard; |
16499 | tmp_output = at::_ops::_foreach_clamp_max_ScalarList::call(self_, scalars); |
16500 | } |
16501 | at::functionalization::impl::replace_(out, tmp_output); |
16502 | at::functionalization::impl::commit_update(out); |
16503 | at::functionalization::impl::sync(out); |
16504 | |
16505 | } |
16506 | } |
16507 | |
16508 | void _foreach_clamp_max__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
16509 | if (true) { |
16510 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16511 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16512 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16513 | auto self_meta = to_meta(self); |
16514 | at::AutoDispatchSkipFunctionalize func_guard; |
16515 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16516 | at::_ops::_foreach_clamp_max__ScalarList::call(self_meta, scalars); |
16517 | } |
16518 | |
16519 | ::std::vector<at::Tensor> self_; |
16520 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16521 | at::functionalization::impl::sync(self); |
16522 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16523 | } else { |
16524 | self_ = self.vec(); |
16525 | } |
16526 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16527 | if ((false)) { |
16528 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16529 | TORCH_INTERNAL_ASSERT(false, |
16530 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16531 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16532 | } else { |
16533 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16534 | at::AutoDispatchSkipFunctionalize guard; |
16535 | at::_ops::_foreach_clamp_max__ScalarList::call(self_, scalars); |
16536 | ; |
16537 | } |
16538 | } else { |
16539 | ::std::vector<at::Tensor> tmp_output; |
16540 | { |
16541 | at::AutoDispatchSkipFunctionalize guard; |
16542 | tmp_output = at::_ops::_foreach_clamp_max_ScalarList::call(self_, scalars); |
16543 | } |
16544 | at::functionalization::impl::replace_(self, tmp_output); |
16545 | at::functionalization::impl::commit_update(self); |
16546 | at::functionalization::impl::sync(self); |
16547 | |
16548 | } |
16549 | } |
16550 | |
16551 | void _foreach_minimum_out_ScalarList_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) { |
16552 | if (false) { |
16553 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16554 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16555 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16556 | auto self_meta = to_meta(self); |
16557 | auto out_meta = to_meta(out); |
16558 | at::AutoDispatchSkipFunctionalize func_guard; |
16559 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16560 | at::_ops::_foreach_minimum_ScalarList_out::call(self_meta, scalars, out_meta); |
16561 | } |
16562 | |
16563 | ::std::vector<at::Tensor> self_; |
16564 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16565 | at::functionalization::impl::sync(self); |
16566 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16567 | } else { |
16568 | self_ = self.vec(); |
16569 | } |
16570 | |
16571 | ::std::vector<at::Tensor> out_; |
16572 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16573 | at::functionalization::impl::sync(out); |
16574 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16575 | } else { |
16576 | out_ = out.vec(); |
16577 | } |
16578 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16579 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16580 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16581 | TORCH_INTERNAL_ASSERT(false, |
16582 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16583 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16584 | } else { |
16585 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16586 | at::AutoDispatchSkipFunctionalize guard; |
16587 | at::_ops::_foreach_minimum_ScalarList_out::call(self_, scalars, out_); |
16588 | ; |
16589 | } |
16590 | } else { |
16591 | ::std::vector<at::Tensor> tmp_output; |
16592 | { |
16593 | at::AutoDispatchSkipFunctionalize guard; |
16594 | tmp_output = at::_ops::_foreach_minimum_ScalarList::call(self_, scalars); |
16595 | } |
16596 | at::functionalization::impl::replace_(out, tmp_output); |
16597 | at::functionalization::impl::commit_update(out); |
16598 | at::functionalization::impl::sync(out); |
16599 | |
16600 | } |
16601 | } |
16602 | |
16603 | void _foreach_minimum__ScalarList(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef<at::Scalar> scalars) { |
16604 | if (true) { |
16605 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16606 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16607 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16608 | auto self_meta = to_meta(self); |
16609 | at::AutoDispatchSkipFunctionalize func_guard; |
16610 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16611 | at::_ops::_foreach_minimum__ScalarList::call(self_meta, scalars); |
16612 | } |
16613 | |
16614 | ::std::vector<at::Tensor> self_; |
16615 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16616 | at::functionalization::impl::sync(self); |
16617 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16618 | } else { |
16619 | self_ = self.vec(); |
16620 | } |
16621 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16622 | if ((false)) { |
16623 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16624 | TORCH_INTERNAL_ASSERT(false, |
16625 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16626 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16627 | } else { |
16628 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16629 | at::AutoDispatchSkipFunctionalize guard; |
16630 | at::_ops::_foreach_minimum__ScalarList::call(self_, scalars); |
16631 | ; |
16632 | } |
16633 | } else { |
16634 | ::std::vector<at::Tensor> tmp_output; |
16635 | { |
16636 | at::AutoDispatchSkipFunctionalize guard; |
16637 | tmp_output = at::_ops::_foreach_minimum_ScalarList::call(self_, scalars); |
16638 | } |
16639 | at::functionalization::impl::replace_(self, tmp_output); |
16640 | at::functionalization::impl::commit_update(self); |
16641 | at::functionalization::impl::sync(self); |
16642 | |
16643 | } |
16644 | } |
16645 | |
16646 | void _foreach_expm1_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16647 | if (false) { |
16648 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16649 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16650 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16651 | auto self_meta = to_meta(self); |
16652 | auto out_meta = to_meta(out); |
16653 | at::AutoDispatchSkipFunctionalize func_guard; |
16654 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16655 | at::_ops::_foreach_expm1_out::call(self_meta, out_meta); |
16656 | } |
16657 | |
16658 | ::std::vector<at::Tensor> self_; |
16659 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16660 | at::functionalization::impl::sync(self); |
16661 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16662 | } else { |
16663 | self_ = self.vec(); |
16664 | } |
16665 | |
16666 | ::std::vector<at::Tensor> out_; |
16667 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16668 | at::functionalization::impl::sync(out); |
16669 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16670 | } else { |
16671 | out_ = out.vec(); |
16672 | } |
16673 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16674 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16675 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16676 | TORCH_INTERNAL_ASSERT(false, |
16677 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16678 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16679 | } else { |
16680 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16681 | at::AutoDispatchSkipFunctionalize guard; |
16682 | at::_ops::_foreach_expm1_out::call(self_, out_); |
16683 | ; |
16684 | } |
16685 | } else { |
16686 | ::std::vector<at::Tensor> tmp_output; |
16687 | { |
16688 | at::AutoDispatchSkipFunctionalize guard; |
16689 | tmp_output = at::_ops::_foreach_expm1::call(self_); |
16690 | } |
16691 | at::functionalization::impl::replace_(out, tmp_output); |
16692 | at::functionalization::impl::commit_update(out); |
16693 | at::functionalization::impl::sync(out); |
16694 | |
16695 | } |
16696 | } |
16697 | |
16698 | void _foreach_expm1_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16699 | if (true) { |
16700 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16701 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16702 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16703 | auto self_meta = to_meta(self); |
16704 | at::AutoDispatchSkipFunctionalize func_guard; |
16705 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16706 | at::_ops::_foreach_expm1_::call(self_meta); |
16707 | } |
16708 | |
16709 | ::std::vector<at::Tensor> self_; |
16710 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16711 | at::functionalization::impl::sync(self); |
16712 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16713 | } else { |
16714 | self_ = self.vec(); |
16715 | } |
16716 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16717 | if ((false)) { |
16718 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16719 | TORCH_INTERNAL_ASSERT(false, |
16720 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16721 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16722 | } else { |
16723 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16724 | at::AutoDispatchSkipFunctionalize guard; |
16725 | at::_ops::_foreach_expm1_::call(self_); |
16726 | ; |
16727 | } |
16728 | } else { |
16729 | ::std::vector<at::Tensor> tmp_output; |
16730 | { |
16731 | at::AutoDispatchSkipFunctionalize guard; |
16732 | tmp_output = at::_ops::_foreach_expm1::call(self_); |
16733 | } |
16734 | at::functionalization::impl::replace_(self, tmp_output); |
16735 | at::functionalization::impl::commit_update(self); |
16736 | at::functionalization::impl::sync(self); |
16737 | |
16738 | } |
16739 | } |
16740 | |
16741 | void _foreach_tanh_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16742 | if (false) { |
16743 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16744 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16745 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16746 | auto self_meta = to_meta(self); |
16747 | auto out_meta = to_meta(out); |
16748 | at::AutoDispatchSkipFunctionalize func_guard; |
16749 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16750 | at::_ops::_foreach_tanh_out::call(self_meta, out_meta); |
16751 | } |
16752 | |
16753 | ::std::vector<at::Tensor> self_; |
16754 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16755 | at::functionalization::impl::sync(self); |
16756 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16757 | } else { |
16758 | self_ = self.vec(); |
16759 | } |
16760 | |
16761 | ::std::vector<at::Tensor> out_; |
16762 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16763 | at::functionalization::impl::sync(out); |
16764 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16765 | } else { |
16766 | out_ = out.vec(); |
16767 | } |
16768 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16769 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16770 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16771 | TORCH_INTERNAL_ASSERT(false, |
16772 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16773 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16774 | } else { |
16775 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16776 | at::AutoDispatchSkipFunctionalize guard; |
16777 | at::_ops::_foreach_tanh_out::call(self_, out_); |
16778 | ; |
16779 | } |
16780 | } else { |
16781 | ::std::vector<at::Tensor> tmp_output; |
16782 | { |
16783 | at::AutoDispatchSkipFunctionalize guard; |
16784 | tmp_output = at::_ops::_foreach_tanh::call(self_); |
16785 | } |
16786 | at::functionalization::impl::replace_(out, tmp_output); |
16787 | at::functionalization::impl::commit_update(out); |
16788 | at::functionalization::impl::sync(out); |
16789 | |
16790 | } |
16791 | } |
16792 | |
16793 | void _foreach_tanh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16794 | if (true) { |
16795 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16796 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16797 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16798 | auto self_meta = to_meta(self); |
16799 | at::AutoDispatchSkipFunctionalize func_guard; |
16800 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16801 | at::_ops::_foreach_tanh_::call(self_meta); |
16802 | } |
16803 | |
16804 | ::std::vector<at::Tensor> self_; |
16805 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16806 | at::functionalization::impl::sync(self); |
16807 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16808 | } else { |
16809 | self_ = self.vec(); |
16810 | } |
16811 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16812 | if ((false)) { |
16813 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16814 | TORCH_INTERNAL_ASSERT(false, |
16815 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16816 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16817 | } else { |
16818 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16819 | at::AutoDispatchSkipFunctionalize guard; |
16820 | at::_ops::_foreach_tanh_::call(self_); |
16821 | ; |
16822 | } |
16823 | } else { |
16824 | ::std::vector<at::Tensor> tmp_output; |
16825 | { |
16826 | at::AutoDispatchSkipFunctionalize guard; |
16827 | tmp_output = at::_ops::_foreach_tanh::call(self_); |
16828 | } |
16829 | at::functionalization::impl::replace_(self, tmp_output); |
16830 | at::functionalization::impl::commit_update(self); |
16831 | at::functionalization::impl::sync(self); |
16832 | |
16833 | } |
16834 | } |
16835 | |
16836 | void _foreach_sin_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16837 | if (false) { |
16838 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16839 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16840 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16841 | auto self_meta = to_meta(self); |
16842 | auto out_meta = to_meta(out); |
16843 | at::AutoDispatchSkipFunctionalize func_guard; |
16844 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16845 | at::_ops::_foreach_sin_out::call(self_meta, out_meta); |
16846 | } |
16847 | |
16848 | ::std::vector<at::Tensor> self_; |
16849 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16850 | at::functionalization::impl::sync(self); |
16851 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16852 | } else { |
16853 | self_ = self.vec(); |
16854 | } |
16855 | |
16856 | ::std::vector<at::Tensor> out_; |
16857 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16858 | at::functionalization::impl::sync(out); |
16859 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16860 | } else { |
16861 | out_ = out.vec(); |
16862 | } |
16863 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16864 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16865 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16866 | TORCH_INTERNAL_ASSERT(false, |
16867 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16868 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16869 | } else { |
16870 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16871 | at::AutoDispatchSkipFunctionalize guard; |
16872 | at::_ops::_foreach_sin_out::call(self_, out_); |
16873 | ; |
16874 | } |
16875 | } else { |
16876 | ::std::vector<at::Tensor> tmp_output; |
16877 | { |
16878 | at::AutoDispatchSkipFunctionalize guard; |
16879 | tmp_output = at::_ops::_foreach_sin::call(self_); |
16880 | } |
16881 | at::functionalization::impl::replace_(out, tmp_output); |
16882 | at::functionalization::impl::commit_update(out); |
16883 | at::functionalization::impl::sync(out); |
16884 | |
16885 | } |
16886 | } |
16887 | |
16888 | void _foreach_sin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16889 | if (true) { |
16890 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16891 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16892 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16893 | auto self_meta = to_meta(self); |
16894 | at::AutoDispatchSkipFunctionalize func_guard; |
16895 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16896 | at::_ops::_foreach_sin_::call(self_meta); |
16897 | } |
16898 | |
16899 | ::std::vector<at::Tensor> self_; |
16900 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16901 | at::functionalization::impl::sync(self); |
16902 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16903 | } else { |
16904 | self_ = self.vec(); |
16905 | } |
16906 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
16907 | if ((false)) { |
16908 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16909 | TORCH_INTERNAL_ASSERT(false, |
16910 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16911 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16912 | } else { |
16913 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16914 | at::AutoDispatchSkipFunctionalize guard; |
16915 | at::_ops::_foreach_sin_::call(self_); |
16916 | ; |
16917 | } |
16918 | } else { |
16919 | ::std::vector<at::Tensor> tmp_output; |
16920 | { |
16921 | at::AutoDispatchSkipFunctionalize guard; |
16922 | tmp_output = at::_ops::_foreach_sin::call(self_); |
16923 | } |
16924 | at::functionalization::impl::replace_(self, tmp_output); |
16925 | at::functionalization::impl::commit_update(self); |
16926 | at::functionalization::impl::sync(self); |
16927 | |
16928 | } |
16929 | } |
16930 | |
16931 | void _foreach_frac_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
16932 | if (false) { |
16933 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16934 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16935 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16936 | auto self_meta = to_meta(self); |
16937 | auto out_meta = to_meta(out); |
16938 | at::AutoDispatchSkipFunctionalize func_guard; |
16939 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16940 | at::_ops::_foreach_frac_out::call(self_meta, out_meta); |
16941 | } |
16942 | |
16943 | ::std::vector<at::Tensor> self_; |
16944 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16945 | at::functionalization::impl::sync(self); |
16946 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16947 | } else { |
16948 | self_ = self.vec(); |
16949 | } |
16950 | |
16951 | ::std::vector<at::Tensor> out_; |
16952 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
16953 | at::functionalization::impl::sync(out); |
16954 | out_ = at::functionalization::impl::from_functional_tensor(out); |
16955 | } else { |
16956 | out_ = out.vec(); |
16957 | } |
16958 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
16959 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
16960 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
16961 | TORCH_INTERNAL_ASSERT(false, |
16962 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
16963 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
16964 | } else { |
16965 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
16966 | at::AutoDispatchSkipFunctionalize guard; |
16967 | at::_ops::_foreach_frac_out::call(self_, out_); |
16968 | ; |
16969 | } |
16970 | } else { |
16971 | ::std::vector<at::Tensor> tmp_output; |
16972 | { |
16973 | at::AutoDispatchSkipFunctionalize guard; |
16974 | tmp_output = at::_ops::_foreach_frac::call(self_); |
16975 | } |
16976 | at::functionalization::impl::replace_(out, tmp_output); |
16977 | at::functionalization::impl::commit_update(out); |
16978 | at::functionalization::impl::sync(out); |
16979 | |
16980 | } |
16981 | } |
16982 | |
16983 | void _foreach_frac_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
16984 | if (true) { |
16985 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
16986 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
16987 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
16988 | auto self_meta = to_meta(self); |
16989 | at::AutoDispatchSkipFunctionalize func_guard; |
16990 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
16991 | at::_ops::_foreach_frac_::call(self_meta); |
16992 | } |
16993 | |
16994 | ::std::vector<at::Tensor> self_; |
16995 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
16996 | at::functionalization::impl::sync(self); |
16997 | self_ = at::functionalization::impl::from_functional_tensor(self); |
16998 | } else { |
16999 | self_ = self.vec(); |
17000 | } |
17001 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
17002 | if ((false)) { |
17003 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17004 | TORCH_INTERNAL_ASSERT(false, |
17005 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17006 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17007 | } else { |
17008 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17009 | at::AutoDispatchSkipFunctionalize guard; |
17010 | at::_ops::_foreach_frac_::call(self_); |
17011 | ; |
17012 | } |
17013 | } else { |
17014 | ::std::vector<at::Tensor> tmp_output; |
17015 | { |
17016 | at::AutoDispatchSkipFunctionalize guard; |
17017 | tmp_output = at::_ops::_foreach_frac::call(self_); |
17018 | } |
17019 | at::functionalization::impl::replace_(self, tmp_output); |
17020 | at::functionalization::impl::commit_update(self); |
17021 | at::functionalization::impl::sync(self); |
17022 | |
17023 | } |
17024 | } |
17025 | |
17026 | void _foreach_reciprocal_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
17027 | if (false) { |
17028 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17029 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17030 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17031 | auto self_meta = to_meta(self); |
17032 | auto out_meta = to_meta(out); |
17033 | at::AutoDispatchSkipFunctionalize func_guard; |
17034 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17035 | at::_ops::_foreach_reciprocal_out::call(self_meta, out_meta); |
17036 | } |
17037 | |
17038 | ::std::vector<at::Tensor> self_; |
17039 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17040 | at::functionalization::impl::sync(self); |
17041 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17042 | } else { |
17043 | self_ = self.vec(); |
17044 | } |
17045 | |
17046 | ::std::vector<at::Tensor> out_; |
17047 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17048 | at::functionalization::impl::sync(out); |
17049 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17050 | } else { |
17051 | out_ = out.vec(); |
17052 | } |
17053 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17054 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17055 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17056 | TORCH_INTERNAL_ASSERT(false, |
17057 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17058 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17059 | } else { |
17060 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17061 | at::AutoDispatchSkipFunctionalize guard; |
17062 | at::_ops::_foreach_reciprocal_out::call(self_, out_); |
17063 | ; |
17064 | } |
17065 | } else { |
17066 | ::std::vector<at::Tensor> tmp_output; |
17067 | { |
17068 | at::AutoDispatchSkipFunctionalize guard; |
17069 | tmp_output = at::_ops::_foreach_reciprocal::call(self_); |
17070 | } |
17071 | at::functionalization::impl::replace_(out, tmp_output); |
17072 | at::functionalization::impl::commit_update(out); |
17073 | at::functionalization::impl::sync(out); |
17074 | |
17075 | } |
17076 | } |
17077 | |
17078 | void _foreach_reciprocal_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
17079 | if (true) { |
17080 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17081 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17082 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17083 | auto self_meta = to_meta(self); |
17084 | at::AutoDispatchSkipFunctionalize func_guard; |
17085 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17086 | at::_ops::_foreach_reciprocal_::call(self_meta); |
17087 | } |
17088 | |
17089 | ::std::vector<at::Tensor> self_; |
17090 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17091 | at::functionalization::impl::sync(self); |
17092 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17093 | } else { |
17094 | self_ = self.vec(); |
17095 | } |
17096 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
17097 | if ((false)) { |
17098 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17099 | TORCH_INTERNAL_ASSERT(false, |
17100 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17101 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17102 | } else { |
17103 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17104 | at::AutoDispatchSkipFunctionalize guard; |
17105 | at::_ops::_foreach_reciprocal_::call(self_); |
17106 | ; |
17107 | } |
17108 | } else { |
17109 | ::std::vector<at::Tensor> tmp_output; |
17110 | { |
17111 | at::AutoDispatchSkipFunctionalize guard; |
17112 | tmp_output = at::_ops::_foreach_reciprocal::call(self_); |
17113 | } |
17114 | at::functionalization::impl::replace_(self, tmp_output); |
17115 | at::functionalization::impl::commit_update(self); |
17116 | at::functionalization::impl::sync(self); |
17117 | |
17118 | } |
17119 | } |
17120 | |
17121 | void _foreach_trunc_out_out(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { |
17122 | if (false) { |
17123 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17124 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17125 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17126 | auto self_meta = to_meta(self); |
17127 | auto out_meta = to_meta(out); |
17128 | at::AutoDispatchSkipFunctionalize func_guard; |
17129 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17130 | at::_ops::_foreach_trunc_out::call(self_meta, out_meta); |
17131 | } |
17132 | |
17133 | ::std::vector<at::Tensor> self_; |
17134 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17135 | at::functionalization::impl::sync(self); |
17136 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17137 | } else { |
17138 | self_ = self.vec(); |
17139 | } |
17140 | |
17141 | ::std::vector<at::Tensor> out_; |
17142 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17143 | at::functionalization::impl::sync(out); |
17144 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17145 | } else { |
17146 | out_ = out.vec(); |
17147 | } |
17148 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17149 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17150 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17151 | TORCH_INTERNAL_ASSERT(false, |
17152 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17153 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17154 | } else { |
17155 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17156 | at::AutoDispatchSkipFunctionalize guard; |
17157 | at::_ops::_foreach_trunc_out::call(self_, out_); |
17158 | ; |
17159 | } |
17160 | } else { |
17161 | ::std::vector<at::Tensor> tmp_output; |
17162 | { |
17163 | at::AutoDispatchSkipFunctionalize guard; |
17164 | tmp_output = at::_ops::_foreach_trunc::call(self_); |
17165 | } |
17166 | at::functionalization::impl::replace_(out, tmp_output); |
17167 | at::functionalization::impl::commit_update(out); |
17168 | at::functionalization::impl::sync(out); |
17169 | |
17170 | } |
17171 | } |
17172 | |
17173 | void _foreach_trunc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { |
17174 | if (true) { |
17175 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17176 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17177 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17178 | auto self_meta = to_meta(self); |
17179 | at::AutoDispatchSkipFunctionalize func_guard; |
17180 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17181 | at::_ops::_foreach_trunc_::call(self_meta); |
17182 | } |
17183 | |
17184 | ::std::vector<at::Tensor> self_; |
17185 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17186 | at::functionalization::impl::sync(self); |
17187 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17188 | } else { |
17189 | self_ = self.vec(); |
17190 | } |
17191 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
17192 | if ((false)) { |
17193 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17194 | TORCH_INTERNAL_ASSERT(false, |
17195 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17196 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17197 | } else { |
17198 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17199 | at::AutoDispatchSkipFunctionalize guard; |
17200 | at::_ops::_foreach_trunc_::call(self_); |
17201 | ; |
17202 | } |
17203 | } else { |
17204 | ::std::vector<at::Tensor> tmp_output; |
17205 | { |
17206 | at::AutoDispatchSkipFunctionalize guard; |
17207 | tmp_output = at::_ops::_foreach_trunc::call(self_); |
17208 | } |
17209 | at::functionalization::impl::replace_(self, tmp_output); |
17210 | at::functionalization::impl::commit_update(self); |
17211 | at::functionalization::impl::sync(self); |
17212 | |
17213 | } |
17214 | } |
17215 | |
17216 | at::Tensor & _convert_indices_from_csr_to_coo_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) { |
17217 | if (false) { |
17218 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17219 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17220 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17221 | auto crow_indices_meta = to_meta(crow_indices); |
17222 | auto col_indices_meta = to_meta(col_indices); |
17223 | auto out_meta = to_meta(out); |
17224 | at::AutoDispatchSkipFunctionalize func_guard; |
17225 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17226 | at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices_meta, col_indices_meta, out_int32, transpose, out_meta); |
17227 | } |
17228 | |
17229 | at::Tensor crow_indices_; |
17230 | if (at::functionalization::impl::isFunctionalTensor(crow_indices)) { |
17231 | at::functionalization::impl::sync(crow_indices); |
17232 | crow_indices_ = at::functionalization::impl::from_functional_tensor(crow_indices); |
17233 | } else { |
17234 | crow_indices_ = crow_indices; |
17235 | } |
17236 | |
17237 | at::Tensor col_indices_; |
17238 | if (at::functionalization::impl::isFunctionalTensor(col_indices)) { |
17239 | at::functionalization::impl::sync(col_indices); |
17240 | col_indices_ = at::functionalization::impl::from_functional_tensor(col_indices); |
17241 | } else { |
17242 | col_indices_ = col_indices; |
17243 | } |
17244 | |
17245 | at::Tensor out_; |
17246 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17247 | at::functionalization::impl::sync(out); |
17248 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17249 | } else { |
17250 | out_ = out; |
17251 | } |
17252 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17253 | if ((false || at::functionalization::impl::isFunctionalTensor(crow_indices) || at::functionalization::impl::isFunctionalTensor(col_indices))) { |
17254 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17255 | TORCH_INTERNAL_ASSERT(false, |
17256 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17257 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17258 | } else { |
17259 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17260 | at::AutoDispatchSkipFunctionalize guard; |
17261 | at::Tensor tmp_output = at::_ops::_convert_indices_from_csr_to_coo_out::call(crow_indices_, col_indices_, out_int32, transpose, out_); |
17262 | return out;; |
17263 | } |
17264 | } else { |
17265 | at::Tensor tmp_output; |
17266 | { |
17267 | at::AutoDispatchSkipFunctionalize guard; |
17268 | tmp_output = at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices_, col_indices_, out_int32, transpose); |
17269 | } |
17270 | at::functionalization::impl::replace_(out, tmp_output); |
17271 | at::functionalization::impl::commit_update(out); |
17272 | at::functionalization::impl::sync(out); |
17273 | return out; |
17274 | } |
17275 | } |
17276 | |
17277 | at::Tensor & multilabel_margin_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) { |
17278 | if (false) { |
17279 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17280 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17281 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17282 | auto grad_output_meta = to_meta(grad_output); |
17283 | auto self_meta = to_meta(self); |
17284 | auto target_meta = to_meta(target); |
17285 | auto is_target_meta = to_meta(is_target); |
17286 | auto grad_input_meta = to_meta(grad_input); |
17287 | at::AutoDispatchSkipFunctionalize func_guard; |
17288 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17289 | at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, is_target_meta, grad_input_meta); |
17290 | } |
17291 | |
17292 | at::Tensor grad_output_; |
17293 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17294 | at::functionalization::impl::sync(grad_output); |
17295 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17296 | } else { |
17297 | grad_output_ = grad_output; |
17298 | } |
17299 | |
17300 | at::Tensor self_; |
17301 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17302 | at::functionalization::impl::sync(self); |
17303 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17304 | } else { |
17305 | self_ = self; |
17306 | } |
17307 | |
17308 | at::Tensor target_; |
17309 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17310 | at::functionalization::impl::sync(target); |
17311 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17312 | } else { |
17313 | target_ = target; |
17314 | } |
17315 | |
17316 | at::Tensor is_target_; |
17317 | if (at::functionalization::impl::isFunctionalTensor(is_target)) { |
17318 | at::functionalization::impl::sync(is_target); |
17319 | is_target_ = at::functionalization::impl::from_functional_tensor(is_target); |
17320 | } else { |
17321 | is_target_ = is_target; |
17322 | } |
17323 | |
17324 | at::Tensor grad_input_; |
17325 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17326 | at::functionalization::impl::sync(grad_input); |
17327 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17328 | } else { |
17329 | grad_input_ = grad_input; |
17330 | } |
17331 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17332 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(is_target))) { |
17333 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17334 | TORCH_INTERNAL_ASSERT(false, |
17335 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17336 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17337 | } else { |
17338 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17339 | at::AutoDispatchSkipFunctionalize guard; |
17340 | at::Tensor tmp_output = at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, is_target_, grad_input_); |
17341 | return grad_input;; |
17342 | } |
17343 | } else { |
17344 | at::Tensor tmp_output; |
17345 | { |
17346 | at::AutoDispatchSkipFunctionalize guard; |
17347 | tmp_output = at::_ops::multilabel_margin_loss_backward::call(grad_output_, self_, target_, reduction, is_target_); |
17348 | } |
17349 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17350 | at::functionalization::impl::commit_update(grad_input); |
17351 | at::functionalization::impl::sync(grad_input); |
17352 | return grad_input; |
17353 | } |
17354 | } |
17355 | |
17356 | at::Tensor & nll_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { |
17357 | if (false) { |
17358 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17359 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17360 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17361 | auto self_meta = to_meta(self); |
17362 | auto target_meta = to_meta(target); |
17363 | auto weight_meta = to_meta(weight); |
17364 | auto out_meta = to_meta(out); |
17365 | at::AutoDispatchSkipFunctionalize func_guard; |
17366 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17367 | at::_ops::nll_loss_out::call(self_meta, target_meta, weight_meta, reduction, ignore_index, out_meta); |
17368 | } |
17369 | |
17370 | at::Tensor self_; |
17371 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17372 | at::functionalization::impl::sync(self); |
17373 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17374 | } else { |
17375 | self_ = self; |
17376 | } |
17377 | |
17378 | at::Tensor target_; |
17379 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17380 | at::functionalization::impl::sync(target); |
17381 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17382 | } else { |
17383 | target_ = target; |
17384 | } |
17385 | |
17386 | c10::optional<at::Tensor> weight_; |
17387 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
17388 | at::functionalization::impl::sync(weight); |
17389 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
17390 | } else { |
17391 | weight_ = weight; |
17392 | } |
17393 | |
17394 | at::Tensor out_; |
17395 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17396 | at::functionalization::impl::sync(out); |
17397 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17398 | } else { |
17399 | out_ = out; |
17400 | } |
17401 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17402 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) { |
17403 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17404 | TORCH_INTERNAL_ASSERT(false, |
17405 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17406 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17407 | } else { |
17408 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17409 | at::AutoDispatchSkipFunctionalize guard; |
17410 | at::Tensor tmp_output = at::_ops::nll_loss_out::call(self_, target_, weight_, reduction, ignore_index, out_); |
17411 | return out;; |
17412 | } |
17413 | } else { |
17414 | at::Tensor tmp_output; |
17415 | { |
17416 | at::AutoDispatchSkipFunctionalize guard; |
17417 | tmp_output = at::_ops::nll_loss::call(self_, target_, weight_, reduction, ignore_index); |
17418 | } |
17419 | at::functionalization::impl::replace_(out, tmp_output); |
17420 | at::functionalization::impl::commit_update(out); |
17421 | at::functionalization::impl::sync(out); |
17422 | return out; |
17423 | } |
17424 | } |
17425 | |
17426 | at::Tensor & nll_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { |
17427 | if (false) { |
17428 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17429 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17430 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17431 | auto grad_output_meta = to_meta(grad_output); |
17432 | auto self_meta = to_meta(self); |
17433 | auto target_meta = to_meta(target); |
17434 | auto weight_meta = to_meta(weight); |
17435 | auto total_weight_meta = to_meta(total_weight); |
17436 | auto grad_input_meta = to_meta(grad_input); |
17437 | at::AutoDispatchSkipFunctionalize func_guard; |
17438 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17439 | at::_ops::nll_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, weight_meta, reduction, ignore_index, total_weight_meta, grad_input_meta); |
17440 | } |
17441 | |
17442 | at::Tensor grad_output_; |
17443 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17444 | at::functionalization::impl::sync(grad_output); |
17445 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17446 | } else { |
17447 | grad_output_ = grad_output; |
17448 | } |
17449 | |
17450 | at::Tensor self_; |
17451 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17452 | at::functionalization::impl::sync(self); |
17453 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17454 | } else { |
17455 | self_ = self; |
17456 | } |
17457 | |
17458 | at::Tensor target_; |
17459 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17460 | at::functionalization::impl::sync(target); |
17461 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17462 | } else { |
17463 | target_ = target; |
17464 | } |
17465 | |
17466 | c10::optional<at::Tensor> weight_; |
17467 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
17468 | at::functionalization::impl::sync(weight); |
17469 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
17470 | } else { |
17471 | weight_ = weight; |
17472 | } |
17473 | |
17474 | at::Tensor total_weight_; |
17475 | if (at::functionalization::impl::isFunctionalTensor(total_weight)) { |
17476 | at::functionalization::impl::sync(total_weight); |
17477 | total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight); |
17478 | } else { |
17479 | total_weight_ = total_weight; |
17480 | } |
17481 | |
17482 | at::Tensor grad_input_; |
17483 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17484 | at::functionalization::impl::sync(grad_input); |
17485 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17486 | } else { |
17487 | grad_input_ = grad_input; |
17488 | } |
17489 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17490 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(total_weight))) { |
17491 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17492 | TORCH_INTERNAL_ASSERT(false, |
17493 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17494 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17495 | } else { |
17496 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17497 | at::AutoDispatchSkipFunctionalize guard; |
17498 | at::Tensor tmp_output = at::_ops::nll_loss_backward_grad_input::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_, grad_input_); |
17499 | return grad_input;; |
17500 | } |
17501 | } else { |
17502 | at::Tensor tmp_output; |
17503 | { |
17504 | at::AutoDispatchSkipFunctionalize guard; |
17505 | tmp_output = at::_ops::nll_loss_backward::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_); |
17506 | } |
17507 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17508 | at::functionalization::impl::commit_update(grad_input); |
17509 | at::functionalization::impl::sync(grad_input); |
17510 | return grad_input; |
17511 | } |
17512 | } |
17513 | |
17514 | at::Tensor & nll_loss2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out) { |
17515 | if (false) { |
17516 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17517 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17518 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17519 | auto self_meta = to_meta(self); |
17520 | auto target_meta = to_meta(target); |
17521 | auto weight_meta = to_meta(weight); |
17522 | auto out_meta = to_meta(out); |
17523 | at::AutoDispatchSkipFunctionalize func_guard; |
17524 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17525 | at::_ops::nll_loss2d_out::call(self_meta, target_meta, weight_meta, reduction, ignore_index, out_meta); |
17526 | } |
17527 | |
17528 | at::Tensor self_; |
17529 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17530 | at::functionalization::impl::sync(self); |
17531 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17532 | } else { |
17533 | self_ = self; |
17534 | } |
17535 | |
17536 | at::Tensor target_; |
17537 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17538 | at::functionalization::impl::sync(target); |
17539 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17540 | } else { |
17541 | target_ = target; |
17542 | } |
17543 | |
17544 | c10::optional<at::Tensor> weight_; |
17545 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
17546 | at::functionalization::impl::sync(weight); |
17547 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
17548 | } else { |
17549 | weight_ = weight; |
17550 | } |
17551 | |
17552 | at::Tensor out_; |
17553 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17554 | at::functionalization::impl::sync(out); |
17555 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17556 | } else { |
17557 | out_ = out; |
17558 | } |
17559 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17560 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight))) { |
17561 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17562 | TORCH_INTERNAL_ASSERT(false, |
17563 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17564 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17565 | } else { |
17566 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17567 | at::AutoDispatchSkipFunctionalize guard; |
17568 | at::Tensor tmp_output = at::_ops::nll_loss2d_out::call(self_, target_, weight_, reduction, ignore_index, out_); |
17569 | return out;; |
17570 | } |
17571 | } else { |
17572 | at::Tensor tmp_output; |
17573 | { |
17574 | at::AutoDispatchSkipFunctionalize guard; |
17575 | tmp_output = at::_ops::nll_loss2d::call(self_, target_, weight_, reduction, ignore_index); |
17576 | } |
17577 | at::functionalization::impl::replace_(out, tmp_output); |
17578 | at::functionalization::impl::commit_update(out); |
17579 | at::functionalization::impl::sync(out); |
17580 | return out; |
17581 | } |
17582 | } |
17583 | |
17584 | at::Tensor & nll_loss2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { |
17585 | if (false) { |
17586 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17587 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17588 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17589 | auto grad_output_meta = to_meta(grad_output); |
17590 | auto self_meta = to_meta(self); |
17591 | auto target_meta = to_meta(target); |
17592 | auto weight_meta = to_meta(weight); |
17593 | auto total_weight_meta = to_meta(total_weight); |
17594 | auto grad_input_meta = to_meta(grad_input); |
17595 | at::AutoDispatchSkipFunctionalize func_guard; |
17596 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17597 | at::_ops::nll_loss2d_backward_grad_input::call(grad_output_meta, self_meta, target_meta, weight_meta, reduction, ignore_index, total_weight_meta, grad_input_meta); |
17598 | } |
17599 | |
17600 | at::Tensor grad_output_; |
17601 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17602 | at::functionalization::impl::sync(grad_output); |
17603 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17604 | } else { |
17605 | grad_output_ = grad_output; |
17606 | } |
17607 | |
17608 | at::Tensor self_; |
17609 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17610 | at::functionalization::impl::sync(self); |
17611 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17612 | } else { |
17613 | self_ = self; |
17614 | } |
17615 | |
17616 | at::Tensor target_; |
17617 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17618 | at::functionalization::impl::sync(target); |
17619 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17620 | } else { |
17621 | target_ = target; |
17622 | } |
17623 | |
17624 | c10::optional<at::Tensor> weight_; |
17625 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
17626 | at::functionalization::impl::sync(weight); |
17627 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
17628 | } else { |
17629 | weight_ = weight; |
17630 | } |
17631 | |
17632 | at::Tensor total_weight_; |
17633 | if (at::functionalization::impl::isFunctionalTensor(total_weight)) { |
17634 | at::functionalization::impl::sync(total_weight); |
17635 | total_weight_ = at::functionalization::impl::from_functional_tensor(total_weight); |
17636 | } else { |
17637 | total_weight_ = total_weight; |
17638 | } |
17639 | |
17640 | at::Tensor grad_input_; |
17641 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17642 | at::functionalization::impl::sync(grad_input); |
17643 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17644 | } else { |
17645 | grad_input_ = grad_input; |
17646 | } |
17647 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17648 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(total_weight))) { |
17649 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17650 | TORCH_INTERNAL_ASSERT(false, |
17651 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17652 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17653 | } else { |
17654 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17655 | at::AutoDispatchSkipFunctionalize guard; |
17656 | at::Tensor tmp_output = at::_ops::nll_loss2d_backward_grad_input::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_, grad_input_); |
17657 | return grad_input;; |
17658 | } |
17659 | } else { |
17660 | at::Tensor tmp_output; |
17661 | { |
17662 | at::AutoDispatchSkipFunctionalize guard; |
17663 | tmp_output = at::_ops::nll_loss2d_backward::call(grad_output_, self_, target_, weight_, reduction, ignore_index, total_weight_); |
17664 | } |
17665 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17666 | at::functionalization::impl::commit_update(grad_input); |
17667 | at::functionalization::impl::sync(grad_input); |
17668 | return grad_input; |
17669 | } |
17670 | } |
17671 | |
17672 | at::Tensor & smooth_l1_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) { |
17673 | if (false) { |
17674 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17675 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17676 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17677 | auto self_meta = to_meta(self); |
17678 | auto target_meta = to_meta(target); |
17679 | auto out_meta = to_meta(out); |
17680 | at::AutoDispatchSkipFunctionalize func_guard; |
17681 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17682 | at::_ops::smooth_l1_loss_out::call(self_meta, target_meta, reduction, beta, out_meta); |
17683 | } |
17684 | |
17685 | at::Tensor self_; |
17686 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17687 | at::functionalization::impl::sync(self); |
17688 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17689 | } else { |
17690 | self_ = self; |
17691 | } |
17692 | |
17693 | at::Tensor target_; |
17694 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17695 | at::functionalization::impl::sync(target); |
17696 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17697 | } else { |
17698 | target_ = target; |
17699 | } |
17700 | |
17701 | at::Tensor out_; |
17702 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17703 | at::functionalization::impl::sync(out); |
17704 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17705 | } else { |
17706 | out_ = out; |
17707 | } |
17708 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17709 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) { |
17710 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17711 | TORCH_INTERNAL_ASSERT(false, |
17712 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17713 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17714 | } else { |
17715 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17716 | at::AutoDispatchSkipFunctionalize guard; |
17717 | at::Tensor tmp_output = at::_ops::smooth_l1_loss_out::call(self_, target_, reduction, beta, out_); |
17718 | return out;; |
17719 | } |
17720 | } else { |
17721 | at::Tensor tmp_output; |
17722 | { |
17723 | at::AutoDispatchSkipFunctionalize guard; |
17724 | tmp_output = at::_ops::smooth_l1_loss::call(self_, target_, reduction, beta); |
17725 | } |
17726 | at::functionalization::impl::replace_(out, tmp_output); |
17727 | at::functionalization::impl::commit_update(out); |
17728 | at::functionalization::impl::sync(out); |
17729 | return out; |
17730 | } |
17731 | } |
17732 | |
17733 | at::Tensor & huber_loss_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { |
17734 | if (false) { |
17735 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17736 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17737 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17738 | auto self_meta = to_meta(self); |
17739 | auto target_meta = to_meta(target); |
17740 | auto out_meta = to_meta(out); |
17741 | at::AutoDispatchSkipFunctionalize func_guard; |
17742 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17743 | at::_ops::huber_loss_out::call(self_meta, target_meta, reduction, delta, out_meta); |
17744 | } |
17745 | |
17746 | at::Tensor self_; |
17747 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17748 | at::functionalization::impl::sync(self); |
17749 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17750 | } else { |
17751 | self_ = self; |
17752 | } |
17753 | |
17754 | at::Tensor target_; |
17755 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17756 | at::functionalization::impl::sync(target); |
17757 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17758 | } else { |
17759 | target_ = target; |
17760 | } |
17761 | |
17762 | at::Tensor out_; |
17763 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17764 | at::functionalization::impl::sync(out); |
17765 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17766 | } else { |
17767 | out_ = out; |
17768 | } |
17769 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17770 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) { |
17771 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17772 | TORCH_INTERNAL_ASSERT(false, |
17773 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17774 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17775 | } else { |
17776 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17777 | at::AutoDispatchSkipFunctionalize guard; |
17778 | at::Tensor tmp_output = at::_ops::huber_loss_out::call(self_, target_, reduction, delta, out_); |
17779 | return out;; |
17780 | } |
17781 | } else { |
17782 | at::Tensor tmp_output; |
17783 | { |
17784 | at::AutoDispatchSkipFunctionalize guard; |
17785 | tmp_output = at::_ops::huber_loss::call(self_, target_, reduction, delta); |
17786 | } |
17787 | at::functionalization::impl::replace_(out, tmp_output); |
17788 | at::functionalization::impl::commit_update(out); |
17789 | at::functionalization::impl::sync(out); |
17790 | return out; |
17791 | } |
17792 | } |
17793 | |
17794 | at::Tensor & soft_margin_loss_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { |
17795 | if (false) { |
17796 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17797 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17798 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17799 | auto grad_output_meta = to_meta(grad_output); |
17800 | auto self_meta = to_meta(self); |
17801 | auto target_meta = to_meta(target); |
17802 | auto grad_input_meta = to_meta(grad_input); |
17803 | at::AutoDispatchSkipFunctionalize func_guard; |
17804 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17805 | at::_ops::soft_margin_loss_backward_grad_input::call(grad_output_meta, self_meta, target_meta, reduction, grad_input_meta); |
17806 | } |
17807 | |
17808 | at::Tensor grad_output_; |
17809 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
17810 | at::functionalization::impl::sync(grad_output); |
17811 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
17812 | } else { |
17813 | grad_output_ = grad_output; |
17814 | } |
17815 | |
17816 | at::Tensor self_; |
17817 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17818 | at::functionalization::impl::sync(self); |
17819 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17820 | } else { |
17821 | self_ = self; |
17822 | } |
17823 | |
17824 | at::Tensor target_; |
17825 | if (at::functionalization::impl::isFunctionalTensor(target)) { |
17826 | at::functionalization::impl::sync(target); |
17827 | target_ = at::functionalization::impl::from_functional_tensor(target); |
17828 | } else { |
17829 | target_ = target; |
17830 | } |
17831 | |
17832 | at::Tensor grad_input_; |
17833 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
17834 | at::functionalization::impl::sync(grad_input); |
17835 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
17836 | } else { |
17837 | grad_input_ = grad_input; |
17838 | } |
17839 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
17840 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(target))) { |
17841 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17842 | TORCH_INTERNAL_ASSERT(false, |
17843 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17844 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17845 | } else { |
17846 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17847 | at::AutoDispatchSkipFunctionalize guard; |
17848 | at::Tensor tmp_output = at::_ops::soft_margin_loss_backward_grad_input::call(grad_output_, self_, target_, reduction, grad_input_); |
17849 | return grad_input;; |
17850 | } |
17851 | } else { |
17852 | at::Tensor tmp_output; |
17853 | { |
17854 | at::AutoDispatchSkipFunctionalize guard; |
17855 | tmp_output = at::_ops::soft_margin_loss_backward::call(grad_output_, self_, target_, reduction); |
17856 | } |
17857 | at::functionalization::impl::replace_(grad_input, tmp_output); |
17858 | at::functionalization::impl::commit_update(grad_input); |
17859 | at::functionalization::impl::sync(grad_input); |
17860 | return grad_input; |
17861 | } |
17862 | } |
17863 | |
17864 | at::Tensor & elu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) { |
17865 | if (false) { |
17866 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17867 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17868 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17869 | auto self_meta = to_meta(self); |
17870 | auto out_meta = to_meta(out); |
17871 | at::AutoDispatchSkipFunctionalize func_guard; |
17872 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17873 | at::_ops::elu_out::call(self_meta, alpha, scale, input_scale, out_meta); |
17874 | } |
17875 | |
17876 | at::Tensor self_; |
17877 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17878 | at::functionalization::impl::sync(self); |
17879 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17880 | } else { |
17881 | self_ = self; |
17882 | } |
17883 | |
17884 | at::Tensor out_; |
17885 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17886 | at::functionalization::impl::sync(out); |
17887 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17888 | } else { |
17889 | out_ = out; |
17890 | } |
17891 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17892 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17893 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17894 | TORCH_INTERNAL_ASSERT(false, |
17895 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17896 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17897 | } else { |
17898 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17899 | at::AutoDispatchSkipFunctionalize guard; |
17900 | at::Tensor tmp_output = at::_ops::elu_out::call(self_, alpha, scale, input_scale, out_); |
17901 | return out;; |
17902 | } |
17903 | } else { |
17904 | at::Tensor tmp_output; |
17905 | { |
17906 | at::AutoDispatchSkipFunctionalize guard; |
17907 | tmp_output = at::_ops::elu::call(self_, alpha, scale, input_scale); |
17908 | } |
17909 | at::functionalization::impl::replace_(out, tmp_output); |
17910 | at::functionalization::impl::commit_update(out); |
17911 | at::functionalization::impl::sync(out); |
17912 | return out; |
17913 | } |
17914 | } |
17915 | |
17916 | at::Tensor & elu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) { |
17917 | if (true) { |
17918 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17919 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17920 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17921 | auto self_meta = to_meta(self); |
17922 | at::AutoDispatchSkipFunctionalize func_guard; |
17923 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17924 | at::_ops::elu_::call(self_meta, alpha, scale, input_scale); |
17925 | } |
17926 | |
17927 | at::Tensor self_; |
17928 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17929 | at::functionalization::impl::sync(self); |
17930 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17931 | } else { |
17932 | self_ = self; |
17933 | } |
17934 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
17935 | if ((false)) { |
17936 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17937 | TORCH_INTERNAL_ASSERT(false, |
17938 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17939 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17940 | } else { |
17941 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17942 | at::AutoDispatchSkipFunctionalize guard; |
17943 | at::Tensor tmp_output = at::_ops::elu_::call(self_, alpha, scale, input_scale); |
17944 | return self;; |
17945 | } |
17946 | } else { |
17947 | at::Tensor tmp_output; |
17948 | { |
17949 | at::AutoDispatchSkipFunctionalize guard; |
17950 | tmp_output = at::_ops::elu::call(self_, alpha, scale, input_scale); |
17951 | } |
17952 | at::functionalization::impl::replace_(self, tmp_output); |
17953 | at::functionalization::impl::commit_update(self); |
17954 | at::functionalization::impl::sync(self); |
17955 | return self; |
17956 | } |
17957 | } |
17958 | |
17959 | at::Tensor & glu_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { |
17960 | if (false) { |
17961 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
17962 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
17963 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
17964 | auto self_meta = to_meta(self); |
17965 | auto out_meta = to_meta(out); |
17966 | at::AutoDispatchSkipFunctionalize func_guard; |
17967 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
17968 | at::_ops::glu_out::call(self_meta, dim, out_meta); |
17969 | } |
17970 | |
17971 | at::Tensor self_; |
17972 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
17973 | at::functionalization::impl::sync(self); |
17974 | self_ = at::functionalization::impl::from_functional_tensor(self); |
17975 | } else { |
17976 | self_ = self; |
17977 | } |
17978 | |
17979 | at::Tensor out_; |
17980 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
17981 | at::functionalization::impl::sync(out); |
17982 | out_ = at::functionalization::impl::from_functional_tensor(out); |
17983 | } else { |
17984 | out_ = out; |
17985 | } |
17986 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
17987 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
17988 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
17989 | TORCH_INTERNAL_ASSERT(false, |
17990 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
17991 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
17992 | } else { |
17993 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
17994 | at::AutoDispatchSkipFunctionalize guard; |
17995 | at::Tensor tmp_output = at::_ops::glu_out::call(self_, dim, out_); |
17996 | return out;; |
17997 | } |
17998 | } else { |
17999 | at::Tensor tmp_output; |
18000 | { |
18001 | at::AutoDispatchSkipFunctionalize guard; |
18002 | tmp_output = at::_ops::glu::call(self_, dim); |
18003 | } |
18004 | at::functionalization::impl::replace_(out, tmp_output); |
18005 | at::functionalization::impl::commit_update(out); |
18006 | at::functionalization::impl::sync(out); |
18007 | return out; |
18008 | } |
18009 | } |
18010 | |
18011 | at::Tensor & hardsigmoid_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
18012 | if (false) { |
18013 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18014 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18015 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18016 | auto self_meta = to_meta(self); |
18017 | auto out_meta = to_meta(out); |
18018 | at::AutoDispatchSkipFunctionalize func_guard; |
18019 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18020 | at::_ops::hardsigmoid_out::call(self_meta, out_meta); |
18021 | } |
18022 | |
18023 | at::Tensor self_; |
18024 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18025 | at::functionalization::impl::sync(self); |
18026 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18027 | } else { |
18028 | self_ = self; |
18029 | } |
18030 | |
18031 | at::Tensor out_; |
18032 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18033 | at::functionalization::impl::sync(out); |
18034 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18035 | } else { |
18036 | out_ = out; |
18037 | } |
18038 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18039 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18040 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18041 | TORCH_INTERNAL_ASSERT(false, |
18042 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18043 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18044 | } else { |
18045 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18046 | at::AutoDispatchSkipFunctionalize guard; |
18047 | at::Tensor tmp_output = at::_ops::hardsigmoid_out::call(self_, out_); |
18048 | return out;; |
18049 | } |
18050 | } else { |
18051 | at::Tensor tmp_output; |
18052 | { |
18053 | at::AutoDispatchSkipFunctionalize guard; |
18054 | tmp_output = at::_ops::hardsigmoid::call(self_); |
18055 | } |
18056 | at::functionalization::impl::replace_(out, tmp_output); |
18057 | at::functionalization::impl::commit_update(out); |
18058 | at::functionalization::impl::sync(out); |
18059 | return out; |
18060 | } |
18061 | } |
18062 | |
18063 | at::Tensor & hardsigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
18064 | if (true) { |
18065 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18066 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18067 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18068 | auto self_meta = to_meta(self); |
18069 | at::AutoDispatchSkipFunctionalize func_guard; |
18070 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18071 | at::_ops::hardsigmoid_::call(self_meta); |
18072 | } |
18073 | |
18074 | at::Tensor self_; |
18075 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18076 | at::functionalization::impl::sync(self); |
18077 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18078 | } else { |
18079 | self_ = self; |
18080 | } |
18081 | if (!(true && at::functionalization::impl::isFunctionalTensor(self))) { |
18082 | if ((false)) { |
18083 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18084 | TORCH_INTERNAL_ASSERT(false, |
18085 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18086 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18087 | } else { |
18088 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18089 | at::AutoDispatchSkipFunctionalize guard; |
18090 | at::Tensor tmp_output = at::_ops::hardsigmoid_::call(self_); |
18091 | return self;; |
18092 | } |
18093 | } else { |
18094 | at::Tensor tmp_output; |
18095 | { |
18096 | at::AutoDispatchSkipFunctionalize guard; |
18097 | tmp_output = at::_ops::hardsigmoid::call(self_); |
18098 | } |
18099 | at::functionalization::impl::replace_(self, tmp_output); |
18100 | at::functionalization::impl::commit_update(self); |
18101 | at::functionalization::impl::sync(self); |
18102 | return self; |
18103 | } |
18104 | } |
18105 | |
18106 | at::Tensor & leaky_relu_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { |
18107 | if (false) { |
18108 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18109 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18110 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18111 | auto grad_output_meta = to_meta(grad_output); |
18112 | auto self_meta = to_meta(self); |
18113 | auto grad_input_meta = to_meta(grad_input); |
18114 | at::AutoDispatchSkipFunctionalize func_guard; |
18115 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18116 | at::_ops::leaky_relu_backward_grad_input::call(grad_output_meta, self_meta, negative_slope, self_is_result, grad_input_meta); |
18117 | } |
18118 | |
18119 | at::Tensor grad_output_; |
18120 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
18121 | at::functionalization::impl::sync(grad_output); |
18122 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
18123 | } else { |
18124 | grad_output_ = grad_output; |
18125 | } |
18126 | |
18127 | at::Tensor self_; |
18128 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18129 | at::functionalization::impl::sync(self); |
18130 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18131 | } else { |
18132 | self_ = self; |
18133 | } |
18134 | |
18135 | at::Tensor grad_input_; |
18136 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
18137 | at::functionalization::impl::sync(grad_input); |
18138 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
18139 | } else { |
18140 | grad_input_ = grad_input; |
18141 | } |
18142 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
18143 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
18144 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18145 | TORCH_INTERNAL_ASSERT(false, |
18146 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18147 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18148 | } else { |
18149 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18150 | at::AutoDispatchSkipFunctionalize guard; |
18151 | at::Tensor tmp_output = at::_ops::leaky_relu_backward_grad_input::call(grad_output_, self_, negative_slope, self_is_result, grad_input_); |
18152 | return grad_input;; |
18153 | } |
18154 | } else { |
18155 | at::Tensor tmp_output; |
18156 | { |
18157 | at::AutoDispatchSkipFunctionalize guard; |
18158 | tmp_output = at::_ops::leaky_relu_backward::call(grad_output_, self_, negative_slope, self_is_result); |
18159 | } |
18160 | at::functionalization::impl::replace_(grad_input, tmp_output); |
18161 | at::functionalization::impl::commit_update(grad_input); |
18162 | at::functionalization::impl::sync(grad_input); |
18163 | return grad_input; |
18164 | } |
18165 | } |
18166 | |
18167 | at::Tensor & softshrink_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { |
18168 | if (false) { |
18169 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18170 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18171 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18172 | auto self_meta = to_meta(self); |
18173 | auto out_meta = to_meta(out); |
18174 | at::AutoDispatchSkipFunctionalize func_guard; |
18175 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18176 | at::_ops::softshrink_out::call(self_meta, lambd, out_meta); |
18177 | } |
18178 | |
18179 | at::Tensor self_; |
18180 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18181 | at::functionalization::impl::sync(self); |
18182 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18183 | } else { |
18184 | self_ = self; |
18185 | } |
18186 | |
18187 | at::Tensor out_; |
18188 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18189 | at::functionalization::impl::sync(out); |
18190 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18191 | } else { |
18192 | out_ = out; |
18193 | } |
18194 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18195 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18196 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18197 | TORCH_INTERNAL_ASSERT(false, |
18198 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18199 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18200 | } else { |
18201 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18202 | at::AutoDispatchSkipFunctionalize guard; |
18203 | at::Tensor tmp_output = at::_ops::softshrink_out::call(self_, lambd, out_); |
18204 | return out;; |
18205 | } |
18206 | } else { |
18207 | at::Tensor tmp_output; |
18208 | { |
18209 | at::AutoDispatchSkipFunctionalize guard; |
18210 | tmp_output = at::_ops::softshrink::call(self_, lambd); |
18211 | } |
18212 | at::functionalization::impl::replace_(out, tmp_output); |
18213 | at::functionalization::impl::commit_update(out); |
18214 | at::functionalization::impl::sync(out); |
18215 | return out; |
18216 | } |
18217 | } |
18218 | |
18219 | at::Tensor & _adaptive_avg_pool2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
18220 | if (false) { |
18221 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18222 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18223 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18224 | auto self_meta = to_meta(self); |
18225 | auto out_meta = to_meta(out); |
18226 | at::AutoDispatchSkipFunctionalize func_guard; |
18227 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18228 | at::_ops::_adaptive_avg_pool2d_out::call(self_meta, output_size, out_meta); |
18229 | } |
18230 | |
18231 | at::Tensor self_; |
18232 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18233 | at::functionalization::impl::sync(self); |
18234 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18235 | } else { |
18236 | self_ = self; |
18237 | } |
18238 | |
18239 | at::Tensor out_; |
18240 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18241 | at::functionalization::impl::sync(out); |
18242 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18243 | } else { |
18244 | out_ = out; |
18245 | } |
18246 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18247 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18248 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18249 | TORCH_INTERNAL_ASSERT(false, |
18250 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18251 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18252 | } else { |
18253 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18254 | at::AutoDispatchSkipFunctionalize guard; |
18255 | at::Tensor tmp_output = at::_ops::_adaptive_avg_pool2d_out::call(self_, output_size, out_); |
18256 | return out;; |
18257 | } |
18258 | } else { |
18259 | at::Tensor tmp_output; |
18260 | { |
18261 | at::AutoDispatchSkipFunctionalize guard; |
18262 | tmp_output = at::_ops::_adaptive_avg_pool2d::call(self_, output_size); |
18263 | } |
18264 | at::functionalization::impl::replace_(out, tmp_output); |
18265 | at::functionalization::impl::commit_update(out); |
18266 | at::functionalization::impl::sync(out); |
18267 | return out; |
18268 | } |
18269 | } |
18270 | |
18271 | at::Tensor & _adaptive_avg_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { |
18272 | if (false) { |
18273 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18274 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18275 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18276 | auto self_meta = to_meta(self); |
18277 | auto out_meta = to_meta(out); |
18278 | at::AutoDispatchSkipFunctionalize func_guard; |
18279 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18280 | at::_ops::_adaptive_avg_pool3d_out::call(self_meta, output_size, out_meta); |
18281 | } |
18282 | |
18283 | at::Tensor self_; |
18284 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18285 | at::functionalization::impl::sync(self); |
18286 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18287 | } else { |
18288 | self_ = self; |
18289 | } |
18290 | |
18291 | at::Tensor out_; |
18292 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18293 | at::functionalization::impl::sync(out); |
18294 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18295 | } else { |
18296 | out_ = out; |
18297 | } |
18298 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18299 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18300 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18301 | TORCH_INTERNAL_ASSERT(false, |
18302 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18303 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18304 | } else { |
18305 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18306 | at::AutoDispatchSkipFunctionalize guard; |
18307 | at::Tensor tmp_output = at::_ops::_adaptive_avg_pool3d_out::call(self_, output_size, out_); |
18308 | return out;; |
18309 | } |
18310 | } else { |
18311 | at::Tensor tmp_output; |
18312 | { |
18313 | at::AutoDispatchSkipFunctionalize guard; |
18314 | tmp_output = at::_ops::_adaptive_avg_pool3d::call(self_, output_size); |
18315 | } |
18316 | at::functionalization::impl::replace_(out, tmp_output); |
18317 | at::functionalization::impl::commit_update(out); |
18318 | at::functionalization::impl::sync(out); |
18319 | return out; |
18320 | } |
18321 | } |
18322 | |
18323 | at::Tensor & avg_pool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & out) { |
18324 | if (false) { |
18325 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18326 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18327 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18328 | auto self_meta = to_meta(self); |
18329 | auto out_meta = to_meta(out); |
18330 | at::AutoDispatchSkipFunctionalize func_guard; |
18331 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18332 | at::_ops::avg_pool3d_out::call(self_meta, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_meta); |
18333 | } |
18334 | |
18335 | at::Tensor self_; |
18336 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18337 | at::functionalization::impl::sync(self); |
18338 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18339 | } else { |
18340 | self_ = self; |
18341 | } |
18342 | |
18343 | at::Tensor out_; |
18344 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18345 | at::functionalization::impl::sync(out); |
18346 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18347 | } else { |
18348 | out_ = out; |
18349 | } |
18350 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18351 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18352 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18353 | TORCH_INTERNAL_ASSERT(false, |
18354 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18355 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18356 | } else { |
18357 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18358 | at::AutoDispatchSkipFunctionalize guard; |
18359 | at::Tensor tmp_output = at::_ops::avg_pool3d_out::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out_); |
18360 | return out;; |
18361 | } |
18362 | } else { |
18363 | at::Tensor tmp_output; |
18364 | { |
18365 | at::AutoDispatchSkipFunctionalize guard; |
18366 | tmp_output = at::_ops::avg_pool3d::call(self_, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); |
18367 | } |
18368 | at::functionalization::impl::replace_(out, tmp_output); |
18369 | at::functionalization::impl::commit_update(out); |
18370 | at::functionalization::impl::sync(out); |
18371 | return out; |
18372 | } |
18373 | } |
18374 | |
18375 | ::std::tuple<at::Tensor &,at::Tensor &> fractional_max_pool3d_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { |
18376 | if (false) { |
18377 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18378 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18379 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18380 | auto self_meta = to_meta(self); |
18381 | auto random_samples_meta = to_meta(random_samples); |
18382 | auto output_meta = to_meta(output); |
18383 | auto indices_meta = to_meta(indices); |
18384 | at::AutoDispatchSkipFunctionalize func_guard; |
18385 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18386 | at::_ops::fractional_max_pool3d_output::call(self_meta, kernel_size, output_size, random_samples_meta, output_meta, indices_meta); |
18387 | } |
18388 | |
18389 | at::Tensor self_; |
18390 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18391 | at::functionalization::impl::sync(self); |
18392 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18393 | } else { |
18394 | self_ = self; |
18395 | } |
18396 | |
18397 | at::Tensor random_samples_; |
18398 | if (at::functionalization::impl::isFunctionalTensor(random_samples)) { |
18399 | at::functionalization::impl::sync(random_samples); |
18400 | random_samples_ = at::functionalization::impl::from_functional_tensor(random_samples); |
18401 | } else { |
18402 | random_samples_ = random_samples; |
18403 | } |
18404 | |
18405 | at::Tensor output_; |
18406 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
18407 | at::functionalization::impl::sync(output); |
18408 | output_ = at::functionalization::impl::from_functional_tensor(output); |
18409 | } else { |
18410 | output_ = output; |
18411 | } |
18412 | |
18413 | at::Tensor indices_; |
18414 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
18415 | at::functionalization::impl::sync(indices); |
18416 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
18417 | } else { |
18418 | indices_ = indices; |
18419 | } |
18420 | if (!(true && at::functionalization::impl::isFunctionalTensor(output) && at::functionalization::impl::isFunctionalTensor(indices))) { |
18421 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(random_samples))) { |
18422 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18423 | TORCH_INTERNAL_ASSERT(false, |
18424 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18425 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18426 | } else { |
18427 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18428 | at::AutoDispatchSkipFunctionalize guard; |
18429 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::fractional_max_pool3d_output::call(self_, kernel_size, output_size, random_samples_, output_, indices_); |
18430 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices);; |
18431 | } |
18432 | } else { |
18433 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
18434 | { |
18435 | at::AutoDispatchSkipFunctionalize guard; |
18436 | tmp_output = at::_ops::fractional_max_pool3d::call(self_, kernel_size, output_size, random_samples_); |
18437 | } |
18438 | at::functionalization::impl::replace_(output, std::get<0>(tmp_output)); |
18439 | at::functionalization::impl::commit_update(output); |
18440 | at::functionalization::impl::sync(output); |
18441 | at::functionalization::impl::replace_(indices, std::get<1>(tmp_output)); |
18442 | at::functionalization::impl::commit_update(indices); |
18443 | at::functionalization::impl::sync(indices); |
18444 | return ::std::tuple<at::Tensor &,at::Tensor &>(output, indices); |
18445 | } |
18446 | } |
18447 | |
18448 | at::Tensor & max_pool2d_with_indices_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { |
18449 | if (false) { |
18450 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18451 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18452 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18453 | auto grad_output_meta = to_meta(grad_output); |
18454 | auto self_meta = to_meta(self); |
18455 | auto indices_meta = to_meta(indices); |
18456 | auto grad_input_meta = to_meta(grad_input); |
18457 | at::AutoDispatchSkipFunctionalize func_guard; |
18458 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18459 | at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output_meta, self_meta, kernel_size, stride, padding, dilation, ceil_mode, indices_meta, grad_input_meta); |
18460 | } |
18461 | |
18462 | at::Tensor grad_output_; |
18463 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
18464 | at::functionalization::impl::sync(grad_output); |
18465 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
18466 | } else { |
18467 | grad_output_ = grad_output; |
18468 | } |
18469 | |
18470 | at::Tensor self_; |
18471 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18472 | at::functionalization::impl::sync(self); |
18473 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18474 | } else { |
18475 | self_ = self; |
18476 | } |
18477 | |
18478 | at::Tensor indices_; |
18479 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
18480 | at::functionalization::impl::sync(indices); |
18481 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
18482 | } else { |
18483 | indices_ = indices; |
18484 | } |
18485 | |
18486 | at::Tensor grad_input_; |
18487 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
18488 | at::functionalization::impl::sync(grad_input); |
18489 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
18490 | } else { |
18491 | grad_input_ = grad_input; |
18492 | } |
18493 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
18494 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) { |
18495 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18496 | TORCH_INTERNAL_ASSERT(false, |
18497 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18498 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18499 | } else { |
18500 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18501 | at::AutoDispatchSkipFunctionalize guard; |
18502 | at::Tensor tmp_output = at::_ops::max_pool2d_with_indices_backward_grad_input::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_, grad_input_); |
18503 | return grad_input;; |
18504 | } |
18505 | } else { |
18506 | at::Tensor tmp_output; |
18507 | { |
18508 | at::AutoDispatchSkipFunctionalize guard; |
18509 | tmp_output = at::_ops::max_pool2d_with_indices_backward::call(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_); |
18510 | } |
18511 | at::functionalization::impl::replace_(grad_input, tmp_output); |
18512 | at::functionalization::impl::commit_update(grad_input); |
18513 | at::functionalization::impl::sync(grad_input); |
18514 | return grad_input; |
18515 | } |
18516 | } |
18517 | |
18518 | at::Tensor & max_unpool3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { |
18519 | if (false) { |
18520 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18521 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18522 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18523 | auto self_meta = to_meta(self); |
18524 | auto indices_meta = to_meta(indices); |
18525 | auto out_meta = to_meta(out); |
18526 | at::AutoDispatchSkipFunctionalize func_guard; |
18527 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18528 | at::_ops::max_unpool3d_out::call(self_meta, indices_meta, output_size, stride, padding, out_meta); |
18529 | } |
18530 | |
18531 | at::Tensor self_; |
18532 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18533 | at::functionalization::impl::sync(self); |
18534 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18535 | } else { |
18536 | self_ = self; |
18537 | } |
18538 | |
18539 | at::Tensor indices_; |
18540 | if (at::functionalization::impl::isFunctionalTensor(indices)) { |
18541 | at::functionalization::impl::sync(indices); |
18542 | indices_ = at::functionalization::impl::from_functional_tensor(indices); |
18543 | } else { |
18544 | indices_ = indices; |
18545 | } |
18546 | |
18547 | at::Tensor out_; |
18548 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18549 | at::functionalization::impl::sync(out); |
18550 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18551 | } else { |
18552 | out_ = out; |
18553 | } |
18554 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18555 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(indices))) { |
18556 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18557 | TORCH_INTERNAL_ASSERT(false, |
18558 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18559 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18560 | } else { |
18561 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18562 | at::AutoDispatchSkipFunctionalize guard; |
18563 | at::Tensor tmp_output = at::_ops::max_unpool3d_out::call(self_, indices_, output_size, stride, padding, out_); |
18564 | return out;; |
18565 | } |
18566 | } else { |
18567 | at::Tensor tmp_output; |
18568 | { |
18569 | at::AutoDispatchSkipFunctionalize guard; |
18570 | tmp_output = at::_ops::max_unpool3d::call(self_, indices_, output_size, stride, padding); |
18571 | } |
18572 | at::functionalization::impl::replace_(out, tmp_output); |
18573 | at::functionalization::impl::commit_update(out); |
18574 | at::functionalization::impl::sync(out); |
18575 | return out; |
18576 | } |
18577 | } |
18578 | |
18579 | at::Tensor & reflection_pad1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
18580 | if (false) { |
18581 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18582 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18583 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18584 | auto self_meta = to_meta(self); |
18585 | auto out_meta = to_meta(out); |
18586 | at::AutoDispatchSkipFunctionalize func_guard; |
18587 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18588 | at::_ops::reflection_pad1d_out::call(self_meta, padding, out_meta); |
18589 | } |
18590 | |
18591 | at::Tensor self_; |
18592 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18593 | at::functionalization::impl::sync(self); |
18594 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18595 | } else { |
18596 | self_ = self; |
18597 | } |
18598 | |
18599 | at::Tensor out_; |
18600 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18601 | at::functionalization::impl::sync(out); |
18602 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18603 | } else { |
18604 | out_ = out; |
18605 | } |
18606 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18607 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18608 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18609 | TORCH_INTERNAL_ASSERT(false, |
18610 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18611 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18612 | } else { |
18613 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18614 | at::AutoDispatchSkipFunctionalize guard; |
18615 | at::Tensor tmp_output = at::_ops::reflection_pad1d_out::call(self_, padding, out_); |
18616 | return out;; |
18617 | } |
18618 | } else { |
18619 | at::Tensor tmp_output; |
18620 | { |
18621 | at::AutoDispatchSkipFunctionalize guard; |
18622 | tmp_output = at::_ops::reflection_pad1d::call(self_, padding); |
18623 | } |
18624 | at::functionalization::impl::replace_(out, tmp_output); |
18625 | at::functionalization::impl::commit_update(out); |
18626 | at::functionalization::impl::sync(out); |
18627 | return out; |
18628 | } |
18629 | } |
18630 | |
18631 | at::Tensor & reflection_pad1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { |
18632 | if (false) { |
18633 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18634 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18635 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18636 | auto grad_output_meta = to_meta(grad_output); |
18637 | auto self_meta = to_meta(self); |
18638 | auto grad_input_meta = to_meta(grad_input); |
18639 | at::AutoDispatchSkipFunctionalize func_guard; |
18640 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18641 | at::_ops::reflection_pad1d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta); |
18642 | } |
18643 | |
18644 | at::Tensor grad_output_; |
18645 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
18646 | at::functionalization::impl::sync(grad_output); |
18647 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
18648 | } else { |
18649 | grad_output_ = grad_output; |
18650 | } |
18651 | |
18652 | at::Tensor self_; |
18653 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18654 | at::functionalization::impl::sync(self); |
18655 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18656 | } else { |
18657 | self_ = self; |
18658 | } |
18659 | |
18660 | at::Tensor grad_input_; |
18661 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
18662 | at::functionalization::impl::sync(grad_input); |
18663 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
18664 | } else { |
18665 | grad_input_ = grad_input; |
18666 | } |
18667 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
18668 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
18669 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18670 | TORCH_INTERNAL_ASSERT(false, |
18671 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18672 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18673 | } else { |
18674 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18675 | at::AutoDispatchSkipFunctionalize guard; |
18676 | at::Tensor tmp_output = at::_ops::reflection_pad1d_backward_grad_input::call(grad_output_, self_, padding, grad_input_); |
18677 | return grad_input;; |
18678 | } |
18679 | } else { |
18680 | at::Tensor tmp_output; |
18681 | { |
18682 | at::AutoDispatchSkipFunctionalize guard; |
18683 | tmp_output = at::_ops::reflection_pad1d_backward::call(grad_output_, self_, padding); |
18684 | } |
18685 | at::functionalization::impl::replace_(grad_input, tmp_output); |
18686 | at::functionalization::impl::commit_update(grad_input); |
18687 | at::functionalization::impl::sync(grad_input); |
18688 | return grad_input; |
18689 | } |
18690 | } |
18691 | |
18692 | at::Tensor & reflection_pad2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
18693 | if (false) { |
18694 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18695 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18696 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18697 | auto self_meta = to_meta(self); |
18698 | auto out_meta = to_meta(out); |
18699 | at::AutoDispatchSkipFunctionalize func_guard; |
18700 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18701 | at::_ops::reflection_pad2d_out::call(self_meta, padding, out_meta); |
18702 | } |
18703 | |
18704 | at::Tensor self_; |
18705 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18706 | at::functionalization::impl::sync(self); |
18707 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18708 | } else { |
18709 | self_ = self; |
18710 | } |
18711 | |
18712 | at::Tensor out_; |
18713 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18714 | at::functionalization::impl::sync(out); |
18715 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18716 | } else { |
18717 | out_ = out; |
18718 | } |
18719 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18720 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18721 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18722 | TORCH_INTERNAL_ASSERT(false, |
18723 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18724 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18725 | } else { |
18726 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18727 | at::AutoDispatchSkipFunctionalize guard; |
18728 | at::Tensor tmp_output = at::_ops::reflection_pad2d_out::call(self_, padding, out_); |
18729 | return out;; |
18730 | } |
18731 | } else { |
18732 | at::Tensor tmp_output; |
18733 | { |
18734 | at::AutoDispatchSkipFunctionalize guard; |
18735 | tmp_output = at::_ops::reflection_pad2d::call(self_, padding); |
18736 | } |
18737 | at::functionalization::impl::replace_(out, tmp_output); |
18738 | at::functionalization::impl::commit_update(out); |
18739 | at::functionalization::impl::sync(out); |
18740 | return out; |
18741 | } |
18742 | } |
18743 | |
18744 | at::Tensor & reflection_pad3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
18745 | if (false) { |
18746 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18747 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18748 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18749 | auto self_meta = to_meta(self); |
18750 | auto out_meta = to_meta(out); |
18751 | at::AutoDispatchSkipFunctionalize func_guard; |
18752 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18753 | at::_ops::reflection_pad3d_out::call(self_meta, padding, out_meta); |
18754 | } |
18755 | |
18756 | at::Tensor self_; |
18757 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18758 | at::functionalization::impl::sync(self); |
18759 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18760 | } else { |
18761 | self_ = self; |
18762 | } |
18763 | |
18764 | at::Tensor out_; |
18765 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18766 | at::functionalization::impl::sync(out); |
18767 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18768 | } else { |
18769 | out_ = out; |
18770 | } |
18771 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18772 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18773 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18774 | TORCH_INTERNAL_ASSERT(false, |
18775 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18776 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18777 | } else { |
18778 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18779 | at::AutoDispatchSkipFunctionalize guard; |
18780 | at::Tensor tmp_output = at::_ops::reflection_pad3d_out::call(self_, padding, out_); |
18781 | return out;; |
18782 | } |
18783 | } else { |
18784 | at::Tensor tmp_output; |
18785 | { |
18786 | at::AutoDispatchSkipFunctionalize guard; |
18787 | tmp_output = at::_ops::reflection_pad3d::call(self_, padding); |
18788 | } |
18789 | at::functionalization::impl::replace_(out, tmp_output); |
18790 | at::functionalization::impl::commit_update(out); |
18791 | at::functionalization::impl::sync(out); |
18792 | return out; |
18793 | } |
18794 | } |
18795 | |
18796 | at::Tensor & replication_pad1d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
18797 | if (false) { |
18798 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18799 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18800 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18801 | auto self_meta = to_meta(self); |
18802 | auto out_meta = to_meta(out); |
18803 | at::AutoDispatchSkipFunctionalize func_guard; |
18804 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18805 | at::_ops::replication_pad1d_out::call(self_meta, padding, out_meta); |
18806 | } |
18807 | |
18808 | at::Tensor self_; |
18809 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18810 | at::functionalization::impl::sync(self); |
18811 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18812 | } else { |
18813 | self_ = self; |
18814 | } |
18815 | |
18816 | at::Tensor out_; |
18817 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18818 | at::functionalization::impl::sync(out); |
18819 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18820 | } else { |
18821 | out_ = out; |
18822 | } |
18823 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18824 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18825 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18826 | TORCH_INTERNAL_ASSERT(false, |
18827 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18828 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18829 | } else { |
18830 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18831 | at::AutoDispatchSkipFunctionalize guard; |
18832 | at::Tensor tmp_output = at::_ops::replication_pad1d_out::call(self_, padding, out_); |
18833 | return out;; |
18834 | } |
18835 | } else { |
18836 | at::Tensor tmp_output; |
18837 | { |
18838 | at::AutoDispatchSkipFunctionalize guard; |
18839 | tmp_output = at::_ops::replication_pad1d::call(self_, padding); |
18840 | } |
18841 | at::functionalization::impl::replace_(out, tmp_output); |
18842 | at::functionalization::impl::commit_update(out); |
18843 | at::functionalization::impl::sync(out); |
18844 | return out; |
18845 | } |
18846 | } |
18847 | |
18848 | at::Tensor & replication_pad1d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { |
18849 | if (false) { |
18850 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18851 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18852 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18853 | auto grad_output_meta = to_meta(grad_output); |
18854 | auto self_meta = to_meta(self); |
18855 | auto grad_input_meta = to_meta(grad_input); |
18856 | at::AutoDispatchSkipFunctionalize func_guard; |
18857 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18858 | at::_ops::replication_pad1d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta); |
18859 | } |
18860 | |
18861 | at::Tensor grad_output_; |
18862 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
18863 | at::functionalization::impl::sync(grad_output); |
18864 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
18865 | } else { |
18866 | grad_output_ = grad_output; |
18867 | } |
18868 | |
18869 | at::Tensor self_; |
18870 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18871 | at::functionalization::impl::sync(self); |
18872 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18873 | } else { |
18874 | self_ = self; |
18875 | } |
18876 | |
18877 | at::Tensor grad_input_; |
18878 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
18879 | at::functionalization::impl::sync(grad_input); |
18880 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
18881 | } else { |
18882 | grad_input_ = grad_input; |
18883 | } |
18884 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
18885 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
18886 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18887 | TORCH_INTERNAL_ASSERT(false, |
18888 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18889 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18890 | } else { |
18891 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18892 | at::AutoDispatchSkipFunctionalize guard; |
18893 | at::Tensor tmp_output = at::_ops::replication_pad1d_backward_grad_input::call(grad_output_, self_, padding, grad_input_); |
18894 | return grad_input;; |
18895 | } |
18896 | } else { |
18897 | at::Tensor tmp_output; |
18898 | { |
18899 | at::AutoDispatchSkipFunctionalize guard; |
18900 | tmp_output = at::_ops::replication_pad1d_backward::call(grad_output_, self_, padding); |
18901 | } |
18902 | at::functionalization::impl::replace_(grad_input, tmp_output); |
18903 | at::functionalization::impl::commit_update(grad_input); |
18904 | at::functionalization::impl::sync(grad_input); |
18905 | return grad_input; |
18906 | } |
18907 | } |
18908 | |
18909 | at::Tensor & replication_pad2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { |
18910 | if (false) { |
18911 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18912 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18913 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18914 | auto grad_output_meta = to_meta(grad_output); |
18915 | auto self_meta = to_meta(self); |
18916 | auto grad_input_meta = to_meta(grad_input); |
18917 | at::AutoDispatchSkipFunctionalize func_guard; |
18918 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18919 | at::_ops::replication_pad2d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta); |
18920 | } |
18921 | |
18922 | at::Tensor grad_output_; |
18923 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
18924 | at::functionalization::impl::sync(grad_output); |
18925 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
18926 | } else { |
18927 | grad_output_ = grad_output; |
18928 | } |
18929 | |
18930 | at::Tensor self_; |
18931 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18932 | at::functionalization::impl::sync(self); |
18933 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18934 | } else { |
18935 | self_ = self; |
18936 | } |
18937 | |
18938 | at::Tensor grad_input_; |
18939 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
18940 | at::functionalization::impl::sync(grad_input); |
18941 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
18942 | } else { |
18943 | grad_input_ = grad_input; |
18944 | } |
18945 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
18946 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
18947 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
18948 | TORCH_INTERNAL_ASSERT(false, |
18949 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
18950 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
18951 | } else { |
18952 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
18953 | at::AutoDispatchSkipFunctionalize guard; |
18954 | at::Tensor tmp_output = at::_ops::replication_pad2d_backward_grad_input::call(grad_output_, self_, padding, grad_input_); |
18955 | return grad_input;; |
18956 | } |
18957 | } else { |
18958 | at::Tensor tmp_output; |
18959 | { |
18960 | at::AutoDispatchSkipFunctionalize guard; |
18961 | tmp_output = at::_ops::replication_pad2d_backward::call(grad_output_, self_, padding); |
18962 | } |
18963 | at::functionalization::impl::replace_(grad_input, tmp_output); |
18964 | at::functionalization::impl::commit_update(grad_input); |
18965 | at::functionalization::impl::sync(grad_input); |
18966 | return grad_input; |
18967 | } |
18968 | } |
18969 | |
18970 | at::Tensor & replication_pad3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { |
18971 | if (false) { |
18972 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
18973 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
18974 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
18975 | auto self_meta = to_meta(self); |
18976 | auto out_meta = to_meta(out); |
18977 | at::AutoDispatchSkipFunctionalize func_guard; |
18978 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
18979 | at::_ops::replication_pad3d_out::call(self_meta, padding, out_meta); |
18980 | } |
18981 | |
18982 | at::Tensor self_; |
18983 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
18984 | at::functionalization::impl::sync(self); |
18985 | self_ = at::functionalization::impl::from_functional_tensor(self); |
18986 | } else { |
18987 | self_ = self; |
18988 | } |
18989 | |
18990 | at::Tensor out_; |
18991 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
18992 | at::functionalization::impl::sync(out); |
18993 | out_ = at::functionalization::impl::from_functional_tensor(out); |
18994 | } else { |
18995 | out_ = out; |
18996 | } |
18997 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
18998 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
18999 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19000 | TORCH_INTERNAL_ASSERT(false, |
19001 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19002 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19003 | } else { |
19004 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19005 | at::AutoDispatchSkipFunctionalize guard; |
19006 | at::Tensor tmp_output = at::_ops::replication_pad3d_out::call(self_, padding, out_); |
19007 | return out;; |
19008 | } |
19009 | } else { |
19010 | at::Tensor tmp_output; |
19011 | { |
19012 | at::AutoDispatchSkipFunctionalize guard; |
19013 | tmp_output = at::_ops::replication_pad3d::call(self_, padding); |
19014 | } |
19015 | at::functionalization::impl::replace_(out, tmp_output); |
19016 | at::functionalization::impl::commit_update(out); |
19017 | at::functionalization::impl::sync(out); |
19018 | return out; |
19019 | } |
19020 | } |
19021 | |
19022 | at::Tensor & replication_pad3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { |
19023 | if (false) { |
19024 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19025 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19026 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19027 | auto grad_output_meta = to_meta(grad_output); |
19028 | auto self_meta = to_meta(self); |
19029 | auto grad_input_meta = to_meta(grad_input); |
19030 | at::AutoDispatchSkipFunctionalize func_guard; |
19031 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19032 | at::_ops::replication_pad3d_backward_grad_input::call(grad_output_meta, self_meta, padding, grad_input_meta); |
19033 | } |
19034 | |
19035 | at::Tensor grad_output_; |
19036 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
19037 | at::functionalization::impl::sync(grad_output); |
19038 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
19039 | } else { |
19040 | grad_output_ = grad_output; |
19041 | } |
19042 | |
19043 | at::Tensor self_; |
19044 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19045 | at::functionalization::impl::sync(self); |
19046 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19047 | } else { |
19048 | self_ = self; |
19049 | } |
19050 | |
19051 | at::Tensor grad_input_; |
19052 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
19053 | at::functionalization::impl::sync(grad_input); |
19054 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
19055 | } else { |
19056 | grad_input_ = grad_input; |
19057 | } |
19058 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
19059 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(self))) { |
19060 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19061 | TORCH_INTERNAL_ASSERT(false, |
19062 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19063 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19064 | } else { |
19065 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19066 | at::AutoDispatchSkipFunctionalize guard; |
19067 | at::Tensor tmp_output = at::_ops::replication_pad3d_backward_grad_input::call(grad_output_, self_, padding, grad_input_); |
19068 | return grad_input;; |
19069 | } |
19070 | } else { |
19071 | at::Tensor tmp_output; |
19072 | { |
19073 | at::AutoDispatchSkipFunctionalize guard; |
19074 | tmp_output = at::_ops::replication_pad3d_backward::call(grad_output_, self_, padding); |
19075 | } |
19076 | at::functionalization::impl::replace_(grad_input, tmp_output); |
19077 | at::functionalization::impl::commit_update(grad_input); |
19078 | at::functionalization::impl::sync(grad_input); |
19079 | return grad_input; |
19080 | } |
19081 | } |
19082 | |
19083 | at::Tensor & upsample_nearest2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) { |
19084 | if (false) { |
19085 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19086 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19087 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19088 | auto self_meta = to_meta(self); |
19089 | auto out_meta = to_meta(out); |
19090 | at::AutoDispatchSkipFunctionalize func_guard; |
19091 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19092 | at::_ops::upsample_nearest2d_out::call(self_meta, output_size, scales_h, scales_w, out_meta); |
19093 | } |
19094 | |
19095 | at::Tensor self_; |
19096 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19097 | at::functionalization::impl::sync(self); |
19098 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19099 | } else { |
19100 | self_ = self; |
19101 | } |
19102 | |
19103 | at::Tensor out_; |
19104 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19105 | at::functionalization::impl::sync(out); |
19106 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19107 | } else { |
19108 | out_ = out; |
19109 | } |
19110 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19111 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19112 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19113 | TORCH_INTERNAL_ASSERT(false, |
19114 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19115 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19116 | } else { |
19117 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19118 | at::AutoDispatchSkipFunctionalize guard; |
19119 | at::Tensor tmp_output = at::_ops::upsample_nearest2d_out::call(self_, output_size, scales_h, scales_w, out_); |
19120 | return out;; |
19121 | } |
19122 | } else { |
19123 | at::Tensor tmp_output; |
19124 | { |
19125 | at::AutoDispatchSkipFunctionalize guard; |
19126 | tmp_output = at::_ops::upsample_nearest2d::call(self_, output_size, scales_h, scales_w); |
19127 | } |
19128 | at::functionalization::impl::replace_(out, tmp_output); |
19129 | at::functionalization::impl::commit_update(out); |
19130 | at::functionalization::impl::sync(out); |
19131 | return out; |
19132 | } |
19133 | } |
19134 | |
19135 | at::Tensor & _upsample_nearest_exact2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & out) { |
19136 | if (false) { |
19137 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19138 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19139 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19140 | auto self_meta = to_meta(self); |
19141 | auto out_meta = to_meta(out); |
19142 | at::AutoDispatchSkipFunctionalize func_guard; |
19143 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19144 | at::_ops::_upsample_nearest_exact2d_out::call(self_meta, output_size, scales_h, scales_w, out_meta); |
19145 | } |
19146 | |
19147 | at::Tensor self_; |
19148 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19149 | at::functionalization::impl::sync(self); |
19150 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19151 | } else { |
19152 | self_ = self; |
19153 | } |
19154 | |
19155 | at::Tensor out_; |
19156 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19157 | at::functionalization::impl::sync(out); |
19158 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19159 | } else { |
19160 | out_ = out; |
19161 | } |
19162 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19163 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19164 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19165 | TORCH_INTERNAL_ASSERT(false, |
19166 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19167 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19168 | } else { |
19169 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19170 | at::AutoDispatchSkipFunctionalize guard; |
19171 | at::Tensor tmp_output = at::_ops::_upsample_nearest_exact2d_out::call(self_, output_size, scales_h, scales_w, out_); |
19172 | return out;; |
19173 | } |
19174 | } else { |
19175 | at::Tensor tmp_output; |
19176 | { |
19177 | at::AutoDispatchSkipFunctionalize guard; |
19178 | tmp_output = at::_ops::_upsample_nearest_exact2d::call(self_, output_size, scales_h, scales_w); |
19179 | } |
19180 | at::functionalization::impl::replace_(out, tmp_output); |
19181 | at::functionalization::impl::commit_update(out); |
19182 | at::functionalization::impl::sync(out); |
19183 | return out; |
19184 | } |
19185 | } |
19186 | |
19187 | at::Tensor & _upsample_nearest_exact2d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) { |
19188 | if (false) { |
19189 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19190 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19191 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19192 | auto grad_output_meta = to_meta(grad_output); |
19193 | auto grad_input_meta = to_meta(grad_input); |
19194 | at::AutoDispatchSkipFunctionalize func_guard; |
19195 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19196 | at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_h, scales_w, grad_input_meta); |
19197 | } |
19198 | |
19199 | at::Tensor grad_output_; |
19200 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
19201 | at::functionalization::impl::sync(grad_output); |
19202 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
19203 | } else { |
19204 | grad_output_ = grad_output; |
19205 | } |
19206 | |
19207 | at::Tensor grad_input_; |
19208 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
19209 | at::functionalization::impl::sync(grad_input); |
19210 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
19211 | } else { |
19212 | grad_input_ = grad_input; |
19213 | } |
19214 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
19215 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
19216 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19217 | TORCH_INTERNAL_ASSERT(false, |
19218 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19219 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19220 | } else { |
19221 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19222 | at::AutoDispatchSkipFunctionalize guard; |
19223 | at::Tensor tmp_output = at::_ops::_upsample_nearest_exact2d_backward_grad_input::call(grad_output_, output_size, input_size, scales_h, scales_w, grad_input_); |
19224 | return grad_input;; |
19225 | } |
19226 | } else { |
19227 | at::Tensor tmp_output; |
19228 | { |
19229 | at::AutoDispatchSkipFunctionalize guard; |
19230 | tmp_output = at::_ops::_upsample_nearest_exact2d_backward::call(grad_output_, output_size, input_size, scales_h, scales_w); |
19231 | } |
19232 | at::functionalization::impl::replace_(grad_input, tmp_output); |
19233 | at::functionalization::impl::commit_update(grad_input); |
19234 | at::functionalization::impl::sync(grad_input); |
19235 | return grad_input; |
19236 | } |
19237 | } |
19238 | |
19239 | at::Tensor & _upsample_nearest_exact3d_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input) { |
19240 | if (false) { |
19241 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19242 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19243 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19244 | auto grad_output_meta = to_meta(grad_output); |
19245 | auto grad_input_meta = to_meta(grad_input); |
19246 | at::AutoDispatchSkipFunctionalize func_guard; |
19247 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19248 | at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output_meta, output_size, input_size, scales_d, scales_h, scales_w, grad_input_meta); |
19249 | } |
19250 | |
19251 | at::Tensor grad_output_; |
19252 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
19253 | at::functionalization::impl::sync(grad_output); |
19254 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
19255 | } else { |
19256 | grad_output_ = grad_output; |
19257 | } |
19258 | |
19259 | at::Tensor grad_input_; |
19260 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
19261 | at::functionalization::impl::sync(grad_input); |
19262 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
19263 | } else { |
19264 | grad_input_ = grad_input; |
19265 | } |
19266 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
19267 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output))) { |
19268 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19269 | TORCH_INTERNAL_ASSERT(false, |
19270 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19271 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19272 | } else { |
19273 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19274 | at::AutoDispatchSkipFunctionalize guard; |
19275 | at::Tensor tmp_output = at::_ops::_upsample_nearest_exact3d_backward_grad_input::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w, grad_input_); |
19276 | return grad_input;; |
19277 | } |
19278 | } else { |
19279 | at::Tensor tmp_output; |
19280 | { |
19281 | at::AutoDispatchSkipFunctionalize guard; |
19282 | tmp_output = at::_ops::_upsample_nearest_exact3d_backward::call(grad_output_, output_size, input_size, scales_d, scales_h, scales_w); |
19283 | } |
19284 | at::functionalization::impl::replace_(grad_input, tmp_output); |
19285 | at::functionalization::impl::commit_update(grad_input); |
19286 | at::functionalization::impl::sync(grad_input); |
19287 | return grad_input; |
19288 | } |
19289 | } |
19290 | |
19291 | at::Tensor & sigmoid_backward_out_grad_input(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { |
19292 | if (false) { |
19293 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19294 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19295 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19296 | auto grad_output_meta = to_meta(grad_output); |
19297 | auto output_meta = to_meta(output); |
19298 | auto grad_input_meta = to_meta(grad_input); |
19299 | at::AutoDispatchSkipFunctionalize func_guard; |
19300 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19301 | at::_ops::sigmoid_backward_grad_input::call(grad_output_meta, output_meta, grad_input_meta); |
19302 | } |
19303 | |
19304 | at::Tensor grad_output_; |
19305 | if (at::functionalization::impl::isFunctionalTensor(grad_output)) { |
19306 | at::functionalization::impl::sync(grad_output); |
19307 | grad_output_ = at::functionalization::impl::from_functional_tensor(grad_output); |
19308 | } else { |
19309 | grad_output_ = grad_output; |
19310 | } |
19311 | |
19312 | at::Tensor output_; |
19313 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
19314 | at::functionalization::impl::sync(output); |
19315 | output_ = at::functionalization::impl::from_functional_tensor(output); |
19316 | } else { |
19317 | output_ = output; |
19318 | } |
19319 | |
19320 | at::Tensor grad_input_; |
19321 | if (at::functionalization::impl::isFunctionalTensor(grad_input)) { |
19322 | at::functionalization::impl::sync(grad_input); |
19323 | grad_input_ = at::functionalization::impl::from_functional_tensor(grad_input); |
19324 | } else { |
19325 | grad_input_ = grad_input; |
19326 | } |
19327 | if (!(true && at::functionalization::impl::isFunctionalTensor(grad_input))) { |
19328 | if ((false || at::functionalization::impl::isFunctionalTensor(grad_output) || at::functionalization::impl::isFunctionalTensor(output))) { |
19329 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19330 | TORCH_INTERNAL_ASSERT(false, |
19331 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19332 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19333 | } else { |
19334 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19335 | at::AutoDispatchSkipFunctionalize guard; |
19336 | at::Tensor tmp_output = at::_ops::sigmoid_backward_grad_input::call(grad_output_, output_, grad_input_); |
19337 | return grad_input;; |
19338 | } |
19339 | } else { |
19340 | at::Tensor tmp_output; |
19341 | { |
19342 | at::AutoDispatchSkipFunctionalize guard; |
19343 | tmp_output = at::_ops::sigmoid_backward::call(grad_output_, output_); |
19344 | } |
19345 | at::functionalization::impl::replace_(grad_input, tmp_output); |
19346 | at::functionalization::impl::commit_update(grad_input); |
19347 | at::functionalization::impl::sync(grad_input); |
19348 | return grad_input; |
19349 | } |
19350 | } |
19351 | |
19352 | at::Tensor & slow_conv_transpose2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { |
19353 | if (false) { |
19354 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19355 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19356 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19357 | auto self_meta = to_meta(self); |
19358 | auto weight_meta = to_meta(weight); |
19359 | auto bias_meta = to_meta(bias); |
19360 | auto out_meta = to_meta(out); |
19361 | at::AutoDispatchSkipFunctionalize func_guard; |
19362 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19363 | at::_ops::slow_conv_transpose2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_padding, dilation, out_meta); |
19364 | } |
19365 | |
19366 | at::Tensor self_; |
19367 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19368 | at::functionalization::impl::sync(self); |
19369 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19370 | } else { |
19371 | self_ = self; |
19372 | } |
19373 | |
19374 | at::Tensor weight_; |
19375 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
19376 | at::functionalization::impl::sync(weight); |
19377 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
19378 | } else { |
19379 | weight_ = weight; |
19380 | } |
19381 | |
19382 | c10::optional<at::Tensor> bias_; |
19383 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
19384 | at::functionalization::impl::sync(bias); |
19385 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
19386 | } else { |
19387 | bias_ = bias; |
19388 | } |
19389 | |
19390 | at::Tensor out_; |
19391 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19392 | at::functionalization::impl::sync(out); |
19393 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19394 | } else { |
19395 | out_ = out; |
19396 | } |
19397 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19398 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
19399 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19400 | TORCH_INTERNAL_ASSERT(false, |
19401 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19402 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19403 | } else { |
19404 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19405 | at::AutoDispatchSkipFunctionalize guard; |
19406 | at::Tensor tmp_output = at::_ops::slow_conv_transpose2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation, out_); |
19407 | return out;; |
19408 | } |
19409 | } else { |
19410 | at::Tensor tmp_output; |
19411 | { |
19412 | at::AutoDispatchSkipFunctionalize guard; |
19413 | tmp_output = at::_ops::slow_conv_transpose2d::call(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation); |
19414 | } |
19415 | at::functionalization::impl::replace_(out, tmp_output); |
19416 | at::functionalization::impl::commit_update(out); |
19417 | at::functionalization::impl::sync(out); |
19418 | return out; |
19419 | } |
19420 | } |
19421 | |
19422 | at::Tensor & _slow_conv2d_forward_out_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { |
19423 | if (false) { |
19424 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19425 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19426 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19427 | auto self_meta = to_meta(self); |
19428 | auto weight_meta = to_meta(weight); |
19429 | auto bias_meta = to_meta(bias); |
19430 | auto output_meta = to_meta(output); |
19431 | at::AutoDispatchSkipFunctionalize func_guard; |
19432 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19433 | at::_ops::_slow_conv2d_forward_output::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, output_meta); |
19434 | } |
19435 | |
19436 | at::Tensor self_; |
19437 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19438 | at::functionalization::impl::sync(self); |
19439 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19440 | } else { |
19441 | self_ = self; |
19442 | } |
19443 | |
19444 | at::Tensor weight_; |
19445 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
19446 | at::functionalization::impl::sync(weight); |
19447 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
19448 | } else { |
19449 | weight_ = weight; |
19450 | } |
19451 | |
19452 | c10::optional<at::Tensor> bias_; |
19453 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
19454 | at::functionalization::impl::sync(bias); |
19455 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
19456 | } else { |
19457 | bias_ = bias; |
19458 | } |
19459 | |
19460 | at::Tensor output_; |
19461 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
19462 | at::functionalization::impl::sync(output); |
19463 | output_ = at::functionalization::impl::from_functional_tensor(output); |
19464 | } else { |
19465 | output_ = output; |
19466 | } |
19467 | if (!(true && at::functionalization::impl::isFunctionalTensor(output))) { |
19468 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
19469 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19470 | TORCH_INTERNAL_ASSERT(false, |
19471 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19472 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19473 | } else { |
19474 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19475 | at::AutoDispatchSkipFunctionalize guard; |
19476 | at::Tensor tmp_output = at::_ops::_slow_conv2d_forward_output::call(self_, weight_, kernel_size, bias_, stride, padding, output_); |
19477 | return output;; |
19478 | } |
19479 | } else { |
19480 | at::Tensor tmp_output; |
19481 | { |
19482 | at::AutoDispatchSkipFunctionalize guard; |
19483 | tmp_output = at::_ops::_slow_conv2d_forward::call(self_, weight_, kernel_size, bias_, stride, padding); |
19484 | } |
19485 | at::functionalization::impl::replace_(output, tmp_output); |
19486 | at::functionalization::impl::commit_update(output); |
19487 | at::functionalization::impl::sync(output); |
19488 | return output; |
19489 | } |
19490 | } |
19491 | |
19492 | at::Tensor & conv_depthwise3d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
19493 | if (false) { |
19494 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19495 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19496 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19497 | auto self_meta = to_meta(self); |
19498 | auto weight_meta = to_meta(weight); |
19499 | auto bias_meta = to_meta(bias); |
19500 | auto out_meta = to_meta(out); |
19501 | at::AutoDispatchSkipFunctionalize func_guard; |
19502 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19503 | at::_ops::conv_depthwise3d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta); |
19504 | } |
19505 | |
19506 | at::Tensor self_; |
19507 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19508 | at::functionalization::impl::sync(self); |
19509 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19510 | } else { |
19511 | self_ = self; |
19512 | } |
19513 | |
19514 | at::Tensor weight_; |
19515 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
19516 | at::functionalization::impl::sync(weight); |
19517 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
19518 | } else { |
19519 | weight_ = weight; |
19520 | } |
19521 | |
19522 | c10::optional<at::Tensor> bias_; |
19523 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
19524 | at::functionalization::impl::sync(bias); |
19525 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
19526 | } else { |
19527 | bias_ = bias; |
19528 | } |
19529 | |
19530 | at::Tensor out_; |
19531 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19532 | at::functionalization::impl::sync(out); |
19533 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19534 | } else { |
19535 | out_ = out; |
19536 | } |
19537 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19538 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
19539 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19540 | TORCH_INTERNAL_ASSERT(false, |
19541 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19542 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19543 | } else { |
19544 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19545 | at::AutoDispatchSkipFunctionalize guard; |
19546 | at::Tensor tmp_output = at::_ops::conv_depthwise3d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_); |
19547 | return out;; |
19548 | } |
19549 | } else { |
19550 | at::Tensor tmp_output; |
19551 | { |
19552 | at::AutoDispatchSkipFunctionalize guard; |
19553 | tmp_output = at::_ops::conv_depthwise3d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation); |
19554 | } |
19555 | at::functionalization::impl::replace_(out, tmp_output); |
19556 | at::functionalization::impl::commit_update(out); |
19557 | at::functionalization::impl::sync(out); |
19558 | return out; |
19559 | } |
19560 | } |
19561 | |
19562 | at::Tensor & slow_conv_dilated2d_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { |
19563 | if (false) { |
19564 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19565 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19566 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19567 | auto self_meta = to_meta(self); |
19568 | auto weight_meta = to_meta(weight); |
19569 | auto bias_meta = to_meta(bias); |
19570 | auto out_meta = to_meta(out); |
19571 | at::AutoDispatchSkipFunctionalize func_guard; |
19572 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19573 | at::_ops::slow_conv_dilated2d_out::call(self_meta, weight_meta, kernel_size, bias_meta, stride, padding, dilation, out_meta); |
19574 | } |
19575 | |
19576 | at::Tensor self_; |
19577 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19578 | at::functionalization::impl::sync(self); |
19579 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19580 | } else { |
19581 | self_ = self; |
19582 | } |
19583 | |
19584 | at::Tensor weight_; |
19585 | if (at::functionalization::impl::isFunctionalTensor(weight)) { |
19586 | at::functionalization::impl::sync(weight); |
19587 | weight_ = at::functionalization::impl::from_functional_tensor(weight); |
19588 | } else { |
19589 | weight_ = weight; |
19590 | } |
19591 | |
19592 | c10::optional<at::Tensor> bias_; |
19593 | if (at::functionalization::impl::isFunctionalTensor(bias)) { |
19594 | at::functionalization::impl::sync(bias); |
19595 | bias_ = at::functionalization::impl::from_functional_tensor(bias); |
19596 | } else { |
19597 | bias_ = bias; |
19598 | } |
19599 | |
19600 | at::Tensor out_; |
19601 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19602 | at::functionalization::impl::sync(out); |
19603 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19604 | } else { |
19605 | out_ = out; |
19606 | } |
19607 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19608 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(weight) || at::functionalization::impl::isFunctionalTensor(bias))) { |
19609 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19610 | TORCH_INTERNAL_ASSERT(false, |
19611 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19612 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19613 | } else { |
19614 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19615 | at::AutoDispatchSkipFunctionalize guard; |
19616 | at::Tensor tmp_output = at::_ops::slow_conv_dilated2d_out::call(self_, weight_, kernel_size, bias_, stride, padding, dilation, out_); |
19617 | return out;; |
19618 | } |
19619 | } else { |
19620 | at::Tensor tmp_output; |
19621 | { |
19622 | at::AutoDispatchSkipFunctionalize guard; |
19623 | tmp_output = at::_ops::slow_conv_dilated2d::call(self_, weight_, kernel_size, bias_, stride, padding, dilation); |
19624 | } |
19625 | at::functionalization::impl::replace_(out, tmp_output); |
19626 | at::functionalization::impl::commit_update(out); |
19627 | at::functionalization::impl::sync(out); |
19628 | return out; |
19629 | } |
19630 | } |
19631 | |
19632 | at::Tensor & special_ndtri_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
19633 | if (false) { |
19634 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19635 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19636 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19637 | auto self_meta = to_meta(self); |
19638 | auto out_meta = to_meta(out); |
19639 | at::AutoDispatchSkipFunctionalize func_guard; |
19640 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19641 | at::_ops::special_ndtri_out::call(self_meta, out_meta); |
19642 | } |
19643 | |
19644 | at::Tensor self_; |
19645 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19646 | at::functionalization::impl::sync(self); |
19647 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19648 | } else { |
19649 | self_ = self; |
19650 | } |
19651 | |
19652 | at::Tensor out_; |
19653 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19654 | at::functionalization::impl::sync(out); |
19655 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19656 | } else { |
19657 | out_ = out; |
19658 | } |
19659 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19660 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19661 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19662 | TORCH_INTERNAL_ASSERT(false, |
19663 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19664 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19665 | } else { |
19666 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19667 | at::AutoDispatchSkipFunctionalize guard; |
19668 | at::Tensor tmp_output = at::_ops::special_ndtri_out::call(self_, out_); |
19669 | return out;; |
19670 | } |
19671 | } else { |
19672 | at::Tensor tmp_output; |
19673 | { |
19674 | at::AutoDispatchSkipFunctionalize guard; |
19675 | tmp_output = at::_ops::special_ndtri::call(self_); |
19676 | } |
19677 | at::functionalization::impl::replace_(out, tmp_output); |
19678 | at::functionalization::impl::commit_update(out); |
19679 | at::functionalization::impl::sync(out); |
19680 | return out; |
19681 | } |
19682 | } |
19683 | |
19684 | at::Tensor & special_erfc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
19685 | if (false) { |
19686 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19687 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19688 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19689 | auto self_meta = to_meta(self); |
19690 | auto out_meta = to_meta(out); |
19691 | at::AutoDispatchSkipFunctionalize func_guard; |
19692 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19693 | at::_ops::special_erfc_out::call(self_meta, out_meta); |
19694 | } |
19695 | |
19696 | at::Tensor self_; |
19697 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19698 | at::functionalization::impl::sync(self); |
19699 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19700 | } else { |
19701 | self_ = self; |
19702 | } |
19703 | |
19704 | at::Tensor out_; |
19705 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19706 | at::functionalization::impl::sync(out); |
19707 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19708 | } else { |
19709 | out_ = out; |
19710 | } |
19711 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19712 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19713 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19714 | TORCH_INTERNAL_ASSERT(false, |
19715 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19716 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19717 | } else { |
19718 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19719 | at::AutoDispatchSkipFunctionalize guard; |
19720 | at::Tensor tmp_output = at::_ops::special_erfc_out::call(self_, out_); |
19721 | return out;; |
19722 | } |
19723 | } else { |
19724 | at::Tensor tmp_output; |
19725 | { |
19726 | at::AutoDispatchSkipFunctionalize guard; |
19727 | tmp_output = at::_ops::special_erfc::call(self_); |
19728 | } |
19729 | at::functionalization::impl::replace_(out, tmp_output); |
19730 | at::functionalization::impl::commit_update(out); |
19731 | at::functionalization::impl::sync(out); |
19732 | return out; |
19733 | } |
19734 | } |
19735 | |
19736 | at::Tensor & special_logit_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> eps, at::Tensor & out) { |
19737 | if (false) { |
19738 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19739 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19740 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19741 | auto self_meta = to_meta(self); |
19742 | auto out_meta = to_meta(out); |
19743 | at::AutoDispatchSkipFunctionalize func_guard; |
19744 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19745 | at::_ops::special_logit_out::call(self_meta, eps, out_meta); |
19746 | } |
19747 | |
19748 | at::Tensor self_; |
19749 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19750 | at::functionalization::impl::sync(self); |
19751 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19752 | } else { |
19753 | self_ = self; |
19754 | } |
19755 | |
19756 | at::Tensor out_; |
19757 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19758 | at::functionalization::impl::sync(out); |
19759 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19760 | } else { |
19761 | out_ = out; |
19762 | } |
19763 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19764 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19765 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19766 | TORCH_INTERNAL_ASSERT(false, |
19767 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19768 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19769 | } else { |
19770 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19771 | at::AutoDispatchSkipFunctionalize guard; |
19772 | at::Tensor tmp_output = at::_ops::special_logit_out::call(self_, eps, out_); |
19773 | return out;; |
19774 | } |
19775 | } else { |
19776 | at::Tensor tmp_output; |
19777 | { |
19778 | at::AutoDispatchSkipFunctionalize guard; |
19779 | tmp_output = at::_ops::special_logit::call(self_, eps); |
19780 | } |
19781 | at::functionalization::impl::replace_(out, tmp_output); |
19782 | at::functionalization::impl::commit_update(out); |
19783 | at::functionalization::impl::sync(out); |
19784 | return out; |
19785 | } |
19786 | } |
19787 | |
19788 | at::Tensor & special_polygamma_out_out(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) { |
19789 | if (false) { |
19790 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19791 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19792 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19793 | auto self_meta = to_meta(self); |
19794 | auto out_meta = to_meta(out); |
19795 | at::AutoDispatchSkipFunctionalize func_guard; |
19796 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19797 | at::_ops::special_polygamma_out::call(n, self_meta, out_meta); |
19798 | } |
19799 | |
19800 | at::Tensor self_; |
19801 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19802 | at::functionalization::impl::sync(self); |
19803 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19804 | } else { |
19805 | self_ = self; |
19806 | } |
19807 | |
19808 | at::Tensor out_; |
19809 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19810 | at::functionalization::impl::sync(out); |
19811 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19812 | } else { |
19813 | out_ = out; |
19814 | } |
19815 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19816 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19817 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19818 | TORCH_INTERNAL_ASSERT(false, |
19819 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19820 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19821 | } else { |
19822 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19823 | at::AutoDispatchSkipFunctionalize guard; |
19824 | at::Tensor tmp_output = at::_ops::special_polygamma_out::call(n, self_, out_); |
19825 | return out;; |
19826 | } |
19827 | } else { |
19828 | at::Tensor tmp_output; |
19829 | { |
19830 | at::AutoDispatchSkipFunctionalize guard; |
19831 | tmp_output = at::_ops::special_polygamma::call(n, self_); |
19832 | } |
19833 | at::functionalization::impl::replace_(out, tmp_output); |
19834 | at::functionalization::impl::commit_update(out); |
19835 | at::functionalization::impl::sync(out); |
19836 | return out; |
19837 | } |
19838 | } |
19839 | |
19840 | at::Tensor & special_sinc_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
19841 | if (false) { |
19842 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19843 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19844 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19845 | auto self_meta = to_meta(self); |
19846 | auto out_meta = to_meta(out); |
19847 | at::AutoDispatchSkipFunctionalize func_guard; |
19848 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19849 | at::_ops::special_sinc_out::call(self_meta, out_meta); |
19850 | } |
19851 | |
19852 | at::Tensor self_; |
19853 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19854 | at::functionalization::impl::sync(self); |
19855 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19856 | } else { |
19857 | self_ = self; |
19858 | } |
19859 | |
19860 | at::Tensor out_; |
19861 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19862 | at::functionalization::impl::sync(out); |
19863 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19864 | } else { |
19865 | out_ = out; |
19866 | } |
19867 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19868 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19869 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19870 | TORCH_INTERNAL_ASSERT(false, |
19871 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19872 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19873 | } else { |
19874 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19875 | at::AutoDispatchSkipFunctionalize guard; |
19876 | at::Tensor tmp_output = at::_ops::special_sinc_out::call(self_, out_); |
19877 | return out;; |
19878 | } |
19879 | } else { |
19880 | at::Tensor tmp_output; |
19881 | { |
19882 | at::AutoDispatchSkipFunctionalize guard; |
19883 | tmp_output = at::_ops::special_sinc::call(self_); |
19884 | } |
19885 | at::functionalization::impl::replace_(out, tmp_output); |
19886 | at::functionalization::impl::commit_update(out); |
19887 | at::functionalization::impl::sync(out); |
19888 | return out; |
19889 | } |
19890 | } |
19891 | |
19892 | at::Tensor & fft_ifft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
19893 | if (false) { |
19894 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19895 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19896 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19897 | auto self_meta = to_meta(self); |
19898 | auto out_meta = to_meta(out); |
19899 | at::AutoDispatchSkipFunctionalize func_guard; |
19900 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19901 | at::_ops::fft_ifft_out::call(self_meta, n, dim, norm, out_meta); |
19902 | } |
19903 | |
19904 | at::Tensor self_; |
19905 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19906 | at::functionalization::impl::sync(self); |
19907 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19908 | } else { |
19909 | self_ = self; |
19910 | } |
19911 | |
19912 | at::Tensor out_; |
19913 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19914 | at::functionalization::impl::sync(out); |
19915 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19916 | } else { |
19917 | out_ = out; |
19918 | } |
19919 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19920 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19921 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19922 | TORCH_INTERNAL_ASSERT(false, |
19923 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19924 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19925 | } else { |
19926 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19927 | at::AutoDispatchSkipFunctionalize guard; |
19928 | at::Tensor tmp_output = at::_ops::fft_ifft_out::call(self_, n, dim, norm, out_); |
19929 | return out;; |
19930 | } |
19931 | } else { |
19932 | at::Tensor tmp_output; |
19933 | { |
19934 | at::AutoDispatchSkipFunctionalize guard; |
19935 | tmp_output = at::_ops::fft_ifft::call(self_, n, dim, norm); |
19936 | } |
19937 | at::functionalization::impl::replace_(out, tmp_output); |
19938 | at::functionalization::impl::commit_update(out); |
19939 | at::functionalization::impl::sync(out); |
19940 | return out; |
19941 | } |
19942 | } |
19943 | |
19944 | at::Tensor & fft_ihfft_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
19945 | if (false) { |
19946 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19947 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
19948 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
19949 | auto self_meta = to_meta(self); |
19950 | auto out_meta = to_meta(out); |
19951 | at::AutoDispatchSkipFunctionalize func_guard; |
19952 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
19953 | at::_ops::fft_ihfft_out::call(self_meta, n, dim, norm, out_meta); |
19954 | } |
19955 | |
19956 | at::Tensor self_; |
19957 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
19958 | at::functionalization::impl::sync(self); |
19959 | self_ = at::functionalization::impl::from_functional_tensor(self); |
19960 | } else { |
19961 | self_ = self; |
19962 | } |
19963 | |
19964 | at::Tensor out_; |
19965 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
19966 | at::functionalization::impl::sync(out); |
19967 | out_ = at::functionalization::impl::from_functional_tensor(out); |
19968 | } else { |
19969 | out_ = out; |
19970 | } |
19971 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
19972 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
19973 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
19974 | TORCH_INTERNAL_ASSERT(false, |
19975 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
19976 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
19977 | } else { |
19978 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
19979 | at::AutoDispatchSkipFunctionalize guard; |
19980 | at::Tensor tmp_output = at::_ops::fft_ihfft_out::call(self_, n, dim, norm, out_); |
19981 | return out;; |
19982 | } |
19983 | } else { |
19984 | at::Tensor tmp_output; |
19985 | { |
19986 | at::AutoDispatchSkipFunctionalize guard; |
19987 | tmp_output = at::_ops::fft_ihfft::call(self_, n, dim, norm); |
19988 | } |
19989 | at::functionalization::impl::replace_(out, tmp_output); |
19990 | at::functionalization::impl::commit_update(out); |
19991 | at::functionalization::impl::sync(out); |
19992 | return out; |
19993 | } |
19994 | } |
19995 | |
19996 | at::Tensor & fft_ifft2_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out) { |
19997 | if (false) { |
19998 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
19999 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20000 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20001 | auto self_meta = to_meta(self); |
20002 | auto out_meta = to_meta(out); |
20003 | at::AutoDispatchSkipFunctionalize func_guard; |
20004 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20005 | at::_ops::fft_ifft2_out::call(self_meta, s, dim, norm, out_meta); |
20006 | } |
20007 | |
20008 | at::Tensor self_; |
20009 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20010 | at::functionalization::impl::sync(self); |
20011 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20012 | } else { |
20013 | self_ = self; |
20014 | } |
20015 | |
20016 | at::Tensor out_; |
20017 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20018 | at::functionalization::impl::sync(out); |
20019 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20020 | } else { |
20021 | out_ = out; |
20022 | } |
20023 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20024 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20025 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20026 | TORCH_INTERNAL_ASSERT(false, |
20027 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20028 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20029 | } else { |
20030 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20031 | at::AutoDispatchSkipFunctionalize guard; |
20032 | at::Tensor tmp_output = at::_ops::fft_ifft2_out::call(self_, s, dim, norm, out_); |
20033 | return out;; |
20034 | } |
20035 | } else { |
20036 | at::Tensor tmp_output; |
20037 | { |
20038 | at::AutoDispatchSkipFunctionalize guard; |
20039 | tmp_output = at::_ops::fft_ifft2::call(self_, s, dim, norm); |
20040 | } |
20041 | at::functionalization::impl::replace_(out, tmp_output); |
20042 | at::functionalization::impl::commit_update(out); |
20043 | at::functionalization::impl::sync(out); |
20044 | return out; |
20045 | } |
20046 | } |
20047 | |
20048 | const at::Tensor & fft_ihfftn_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out) { |
20049 | if (false) { |
20050 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20051 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20052 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20053 | auto self_meta = to_meta(self); |
20054 | auto out_meta = to_meta(out); |
20055 | at::AutoDispatchSkipFunctionalize func_guard; |
20056 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20057 | at::_ops::fft_ihfftn_out::call(self_meta, s, dim, norm, out_meta); |
20058 | } |
20059 | |
20060 | at::Tensor self_; |
20061 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20062 | at::functionalization::impl::sync(self); |
20063 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20064 | } else { |
20065 | self_ = self; |
20066 | } |
20067 | |
20068 | at::Tensor out_; |
20069 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20070 | at::functionalization::impl::sync(out); |
20071 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20072 | } else { |
20073 | out_ = out; |
20074 | } |
20075 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20076 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20077 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20078 | TORCH_INTERNAL_ASSERT(false, |
20079 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20080 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20081 | } else { |
20082 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20083 | at::AutoDispatchSkipFunctionalize guard; |
20084 | at::Tensor tmp_output = at::_ops::fft_ihfftn_out::call(self_, s, dim, norm, out_); |
20085 | return out;; |
20086 | } |
20087 | } else { |
20088 | at::Tensor tmp_output; |
20089 | { |
20090 | at::AutoDispatchSkipFunctionalize guard; |
20091 | tmp_output = at::_ops::fft_ihfftn::call(self_, s, dim, norm); |
20092 | } |
20093 | at::functionalization::impl::replace_(out, tmp_output); |
20094 | at::functionalization::impl::commit_update(out); |
20095 | at::functionalization::impl::sync(out); |
20096 | return out; |
20097 | } |
20098 | } |
20099 | |
20100 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_cholesky_ex_out_L(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) { |
20101 | if (false) { |
20102 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20103 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20104 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20105 | auto self_meta = to_meta(self); |
20106 | auto L_meta = to_meta(L); |
20107 | auto info_meta = to_meta(info); |
20108 | at::AutoDispatchSkipFunctionalize func_guard; |
20109 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20110 | at::_ops::linalg_cholesky_ex_L::call(self_meta, upper, check_errors, L_meta, info_meta); |
20111 | } |
20112 | |
20113 | at::Tensor self_; |
20114 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20115 | at::functionalization::impl::sync(self); |
20116 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20117 | } else { |
20118 | self_ = self; |
20119 | } |
20120 | |
20121 | at::Tensor L_; |
20122 | if (at::functionalization::impl::isFunctionalTensor(L)) { |
20123 | at::functionalization::impl::sync(L); |
20124 | L_ = at::functionalization::impl::from_functional_tensor(L); |
20125 | } else { |
20126 | L_ = L; |
20127 | } |
20128 | |
20129 | at::Tensor info_; |
20130 | if (at::functionalization::impl::isFunctionalTensor(info)) { |
20131 | at::functionalization::impl::sync(info); |
20132 | info_ = at::functionalization::impl::from_functional_tensor(info); |
20133 | } else { |
20134 | info_ = info; |
20135 | } |
20136 | if (!(true && at::functionalization::impl::isFunctionalTensor(L) && at::functionalization::impl::isFunctionalTensor(info))) { |
20137 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20138 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20139 | TORCH_INTERNAL_ASSERT(false, |
20140 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20141 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20142 | } else { |
20143 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20144 | at::AutoDispatchSkipFunctionalize guard; |
20145 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_cholesky_ex_L::call(self_, upper, check_errors, L_, info_); |
20146 | return ::std::tuple<at::Tensor &,at::Tensor &>(L, info);; |
20147 | } |
20148 | } else { |
20149 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
20150 | { |
20151 | at::AutoDispatchSkipFunctionalize guard; |
20152 | tmp_output = at::_ops::linalg_cholesky_ex::call(self_, upper, check_errors); |
20153 | } |
20154 | at::functionalization::impl::replace_(L, std::get<0>(tmp_output)); |
20155 | at::functionalization::impl::commit_update(L); |
20156 | at::functionalization::impl::sync(L); |
20157 | at::functionalization::impl::replace_(info, std::get<1>(tmp_output)); |
20158 | at::functionalization::impl::commit_update(info); |
20159 | at::functionalization::impl::sync(info); |
20160 | return ::std::tuple<at::Tensor &,at::Tensor &>(L, info); |
20161 | } |
20162 | } |
20163 | |
20164 | at::Tensor & linalg_cross_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) { |
20165 | if (false) { |
20166 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20167 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20168 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20169 | auto self_meta = to_meta(self); |
20170 | auto other_meta = to_meta(other); |
20171 | auto out_meta = to_meta(out); |
20172 | at::AutoDispatchSkipFunctionalize func_guard; |
20173 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20174 | at::_ops::linalg_cross_out::call(self_meta, other_meta, dim, out_meta); |
20175 | } |
20176 | |
20177 | at::Tensor self_; |
20178 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20179 | at::functionalization::impl::sync(self); |
20180 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20181 | } else { |
20182 | self_ = self; |
20183 | } |
20184 | |
20185 | at::Tensor other_; |
20186 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
20187 | at::functionalization::impl::sync(other); |
20188 | other_ = at::functionalization::impl::from_functional_tensor(other); |
20189 | } else { |
20190 | other_ = other; |
20191 | } |
20192 | |
20193 | at::Tensor out_; |
20194 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20195 | at::functionalization::impl::sync(out); |
20196 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20197 | } else { |
20198 | out_ = out; |
20199 | } |
20200 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20201 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
20202 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20203 | TORCH_INTERNAL_ASSERT(false, |
20204 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20205 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20206 | } else { |
20207 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20208 | at::AutoDispatchSkipFunctionalize guard; |
20209 | at::Tensor tmp_output = at::_ops::linalg_cross_out::call(self_, other_, dim, out_); |
20210 | return out;; |
20211 | } |
20212 | } else { |
20213 | at::Tensor tmp_output; |
20214 | { |
20215 | at::AutoDispatchSkipFunctionalize guard; |
20216 | tmp_output = at::_ops::linalg_cross::call(self_, other_, dim); |
20217 | } |
20218 | at::functionalization::impl::replace_(out, tmp_output); |
20219 | at::functionalization::impl::commit_update(out); |
20220 | at::functionalization::impl::sync(out); |
20221 | return out; |
20222 | } |
20223 | } |
20224 | |
20225 | at::Tensor & linalg_lu_solve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) { |
20226 | if (false) { |
20227 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20228 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20229 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20230 | auto LU_meta = to_meta(LU); |
20231 | auto pivots_meta = to_meta(pivots); |
20232 | auto B_meta = to_meta(B); |
20233 | auto out_meta = to_meta(out); |
20234 | at::AutoDispatchSkipFunctionalize func_guard; |
20235 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20236 | at::_ops::linalg_lu_solve_out::call(LU_meta, pivots_meta, B_meta, left, adjoint, out_meta); |
20237 | } |
20238 | |
20239 | at::Tensor LU_; |
20240 | if (at::functionalization::impl::isFunctionalTensor(LU)) { |
20241 | at::functionalization::impl::sync(LU); |
20242 | LU_ = at::functionalization::impl::from_functional_tensor(LU); |
20243 | } else { |
20244 | LU_ = LU; |
20245 | } |
20246 | |
20247 | at::Tensor pivots_; |
20248 | if (at::functionalization::impl::isFunctionalTensor(pivots)) { |
20249 | at::functionalization::impl::sync(pivots); |
20250 | pivots_ = at::functionalization::impl::from_functional_tensor(pivots); |
20251 | } else { |
20252 | pivots_ = pivots; |
20253 | } |
20254 | |
20255 | at::Tensor B_; |
20256 | if (at::functionalization::impl::isFunctionalTensor(B)) { |
20257 | at::functionalization::impl::sync(B); |
20258 | B_ = at::functionalization::impl::from_functional_tensor(B); |
20259 | } else { |
20260 | B_ = B; |
20261 | } |
20262 | |
20263 | at::Tensor out_; |
20264 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20265 | at::functionalization::impl::sync(out); |
20266 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20267 | } else { |
20268 | out_ = out; |
20269 | } |
20270 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20271 | if ((false || at::functionalization::impl::isFunctionalTensor(LU) || at::functionalization::impl::isFunctionalTensor(pivots) || at::functionalization::impl::isFunctionalTensor(B))) { |
20272 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20273 | TORCH_INTERNAL_ASSERT(false, |
20274 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20275 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20276 | } else { |
20277 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20278 | at::AutoDispatchSkipFunctionalize guard; |
20279 | at::Tensor tmp_output = at::_ops::linalg_lu_solve_out::call(LU_, pivots_, B_, left, adjoint, out_); |
20280 | return out;; |
20281 | } |
20282 | } else { |
20283 | at::Tensor tmp_output; |
20284 | { |
20285 | at::AutoDispatchSkipFunctionalize guard; |
20286 | tmp_output = at::_ops::linalg_lu_solve::call(LU_, pivots_, B_, left, adjoint); |
20287 | } |
20288 | at::functionalization::impl::replace_(out, tmp_output); |
20289 | at::functionalization::impl::commit_update(out); |
20290 | at::functionalization::impl::sync(out); |
20291 | return out; |
20292 | } |
20293 | } |
20294 | |
20295 | at::Tensor & linalg_matmul_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { |
20296 | if (false) { |
20297 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20298 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20299 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20300 | auto self_meta = to_meta(self); |
20301 | auto other_meta = to_meta(other); |
20302 | auto out_meta = to_meta(out); |
20303 | at::AutoDispatchSkipFunctionalize func_guard; |
20304 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20305 | at::_ops::linalg_matmul_out::call(self_meta, other_meta, out_meta); |
20306 | } |
20307 | |
20308 | at::Tensor self_; |
20309 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20310 | at::functionalization::impl::sync(self); |
20311 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20312 | } else { |
20313 | self_ = self; |
20314 | } |
20315 | |
20316 | at::Tensor other_; |
20317 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
20318 | at::functionalization::impl::sync(other); |
20319 | other_ = at::functionalization::impl::from_functional_tensor(other); |
20320 | } else { |
20321 | other_ = other; |
20322 | } |
20323 | |
20324 | at::Tensor out_; |
20325 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20326 | at::functionalization::impl::sync(out); |
20327 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20328 | } else { |
20329 | out_ = out; |
20330 | } |
20331 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20332 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
20333 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20334 | TORCH_INTERNAL_ASSERT(false, |
20335 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20336 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20337 | } else { |
20338 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20339 | at::AutoDispatchSkipFunctionalize guard; |
20340 | at::Tensor tmp_output = at::_ops::linalg_matmul_out::call(self_, other_, out_); |
20341 | return out;; |
20342 | } |
20343 | } else { |
20344 | at::Tensor tmp_output; |
20345 | { |
20346 | at::AutoDispatchSkipFunctionalize guard; |
20347 | tmp_output = at::_ops::linalg_matmul::call(self_, other_); |
20348 | } |
20349 | at::functionalization::impl::replace_(out, tmp_output); |
20350 | at::functionalization::impl::commit_update(out); |
20351 | at::functionalization::impl::sync(out); |
20352 | return out; |
20353 | } |
20354 | } |
20355 | |
20356 | at::Tensor & linalg_vecdot_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { |
20357 | if (false) { |
20358 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20359 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20360 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20361 | auto x_meta = to_meta(x); |
20362 | auto y_meta = to_meta(y); |
20363 | auto out_meta = to_meta(out); |
20364 | at::AutoDispatchSkipFunctionalize func_guard; |
20365 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20366 | at::_ops::linalg_vecdot_out::call(x_meta, y_meta, dim, out_meta); |
20367 | } |
20368 | |
20369 | at::Tensor x_; |
20370 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
20371 | at::functionalization::impl::sync(x); |
20372 | x_ = at::functionalization::impl::from_functional_tensor(x); |
20373 | } else { |
20374 | x_ = x; |
20375 | } |
20376 | |
20377 | at::Tensor y_; |
20378 | if (at::functionalization::impl::isFunctionalTensor(y)) { |
20379 | at::functionalization::impl::sync(y); |
20380 | y_ = at::functionalization::impl::from_functional_tensor(y); |
20381 | } else { |
20382 | y_ = y; |
20383 | } |
20384 | |
20385 | at::Tensor out_; |
20386 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20387 | at::functionalization::impl::sync(out); |
20388 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20389 | } else { |
20390 | out_ = out; |
20391 | } |
20392 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20393 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(y))) { |
20394 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20395 | TORCH_INTERNAL_ASSERT(false, |
20396 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20397 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20398 | } else { |
20399 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20400 | at::AutoDispatchSkipFunctionalize guard; |
20401 | at::Tensor tmp_output = at::_ops::linalg_vecdot_out::call(x_, y_, dim, out_); |
20402 | return out;; |
20403 | } |
20404 | } else { |
20405 | at::Tensor tmp_output; |
20406 | { |
20407 | at::AutoDispatchSkipFunctionalize guard; |
20408 | tmp_output = at::_ops::linalg_vecdot::call(x_, y_, dim); |
20409 | } |
20410 | at::functionalization::impl::replace_(out, tmp_output); |
20411 | at::functionalization::impl::commit_update(out); |
20412 | at::functionalization::impl::sync(out); |
20413 | return out; |
20414 | } |
20415 | } |
20416 | |
20417 | at::Tensor & linalg_eigvalsh_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { |
20418 | if (false) { |
20419 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20420 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20421 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20422 | auto self_meta = to_meta(self); |
20423 | auto out_meta = to_meta(out); |
20424 | at::AutoDispatchSkipFunctionalize func_guard; |
20425 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20426 | at::_ops::linalg_eigvalsh_out::call(self_meta, UPLO, out_meta); |
20427 | } |
20428 | |
20429 | at::Tensor self_; |
20430 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20431 | at::functionalization::impl::sync(self); |
20432 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20433 | } else { |
20434 | self_ = self; |
20435 | } |
20436 | |
20437 | at::Tensor out_; |
20438 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20439 | at::functionalization::impl::sync(out); |
20440 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20441 | } else { |
20442 | out_ = out; |
20443 | } |
20444 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20445 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20446 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20447 | TORCH_INTERNAL_ASSERT(false, |
20448 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20449 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20450 | } else { |
20451 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20452 | at::AutoDispatchSkipFunctionalize guard; |
20453 | at::Tensor tmp_output = at::_ops::linalg_eigvalsh_out::call(self_, UPLO, out_); |
20454 | return out;; |
20455 | } |
20456 | } else { |
20457 | at::Tensor tmp_output; |
20458 | { |
20459 | at::AutoDispatchSkipFunctionalize guard; |
20460 | tmp_output = at::_ops::linalg_eigvalsh::call(self_, UPLO); |
20461 | } |
20462 | at::functionalization::impl::replace_(out, tmp_output); |
20463 | at::functionalization::impl::commit_update(out); |
20464 | at::functionalization::impl::sync(out); |
20465 | return out; |
20466 | } |
20467 | } |
20468 | |
20469 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_inv_ex_out_inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) { |
20470 | if (false) { |
20471 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20472 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20473 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20474 | auto A_meta = to_meta(A); |
20475 | auto inverse_meta = to_meta(inverse); |
20476 | auto info_meta = to_meta(info); |
20477 | at::AutoDispatchSkipFunctionalize func_guard; |
20478 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20479 | at::_ops::linalg_inv_ex_inverse::call(A_meta, check_errors, inverse_meta, info_meta); |
20480 | } |
20481 | |
20482 | at::Tensor A_; |
20483 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
20484 | at::functionalization::impl::sync(A); |
20485 | A_ = at::functionalization::impl::from_functional_tensor(A); |
20486 | } else { |
20487 | A_ = A; |
20488 | } |
20489 | |
20490 | at::Tensor inverse_; |
20491 | if (at::functionalization::impl::isFunctionalTensor(inverse)) { |
20492 | at::functionalization::impl::sync(inverse); |
20493 | inverse_ = at::functionalization::impl::from_functional_tensor(inverse); |
20494 | } else { |
20495 | inverse_ = inverse; |
20496 | } |
20497 | |
20498 | at::Tensor info_; |
20499 | if (at::functionalization::impl::isFunctionalTensor(info)) { |
20500 | at::functionalization::impl::sync(info); |
20501 | info_ = at::functionalization::impl::from_functional_tensor(info); |
20502 | } else { |
20503 | info_ = info; |
20504 | } |
20505 | if (!(true && at::functionalization::impl::isFunctionalTensor(inverse) && at::functionalization::impl::isFunctionalTensor(info))) { |
20506 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
20507 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20508 | TORCH_INTERNAL_ASSERT(false, |
20509 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20510 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20511 | } else { |
20512 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20513 | at::AutoDispatchSkipFunctionalize guard; |
20514 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_inv_ex_inverse::call(A_, check_errors, inverse_, info_); |
20515 | return ::std::tuple<at::Tensor &,at::Tensor &>(inverse, info);; |
20516 | } |
20517 | } else { |
20518 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
20519 | { |
20520 | at::AutoDispatchSkipFunctionalize guard; |
20521 | tmp_output = at::_ops::linalg_inv_ex::call(A_, check_errors); |
20522 | } |
20523 | at::functionalization::impl::replace_(inverse, std::get<0>(tmp_output)); |
20524 | at::functionalization::impl::commit_update(inverse); |
20525 | at::functionalization::impl::sync(inverse); |
20526 | at::functionalization::impl::replace_(info, std::get<1>(tmp_output)); |
20527 | at::functionalization::impl::commit_update(info); |
20528 | at::functionalization::impl::sync(info); |
20529 | return ::std::tuple<at::Tensor &,at::Tensor &>(inverse, info); |
20530 | } |
20531 | } |
20532 | |
20533 | at::Tensor & inverse_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
20534 | if (false) { |
20535 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20536 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20537 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20538 | auto self_meta = to_meta(self); |
20539 | auto out_meta = to_meta(out); |
20540 | at::AutoDispatchSkipFunctionalize func_guard; |
20541 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20542 | at::_ops::inverse_out::call(self_meta, out_meta); |
20543 | } |
20544 | |
20545 | at::Tensor self_; |
20546 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20547 | at::functionalization::impl::sync(self); |
20548 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20549 | } else { |
20550 | self_ = self; |
20551 | } |
20552 | |
20553 | at::Tensor out_; |
20554 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20555 | at::functionalization::impl::sync(out); |
20556 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20557 | } else { |
20558 | out_ = out; |
20559 | } |
20560 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20561 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20562 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20563 | TORCH_INTERNAL_ASSERT(false, |
20564 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20565 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20566 | } else { |
20567 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20568 | at::AutoDispatchSkipFunctionalize guard; |
20569 | at::Tensor tmp_output = at::_ops::inverse_out::call(self_, out_); |
20570 | return out;; |
20571 | } |
20572 | } else { |
20573 | at::Tensor tmp_output; |
20574 | { |
20575 | at::AutoDispatchSkipFunctionalize guard; |
20576 | tmp_output = at::_ops::inverse::call(self_); |
20577 | } |
20578 | at::functionalization::impl::replace_(out, tmp_output); |
20579 | at::functionalization::impl::commit_update(out); |
20580 | at::functionalization::impl::sync(out); |
20581 | return out; |
20582 | } |
20583 | } |
20584 | |
20585 | at::Tensor & outer_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { |
20586 | if (false) { |
20587 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20588 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20589 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20590 | auto self_meta = to_meta(self); |
20591 | auto vec2_meta = to_meta(vec2); |
20592 | auto out_meta = to_meta(out); |
20593 | at::AutoDispatchSkipFunctionalize func_guard; |
20594 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20595 | at::_ops::outer_out::call(self_meta, vec2_meta, out_meta); |
20596 | } |
20597 | |
20598 | at::Tensor self_; |
20599 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20600 | at::functionalization::impl::sync(self); |
20601 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20602 | } else { |
20603 | self_ = self; |
20604 | } |
20605 | |
20606 | at::Tensor vec2_; |
20607 | if (at::functionalization::impl::isFunctionalTensor(vec2)) { |
20608 | at::functionalization::impl::sync(vec2); |
20609 | vec2_ = at::functionalization::impl::from_functional_tensor(vec2); |
20610 | } else { |
20611 | vec2_ = vec2; |
20612 | } |
20613 | |
20614 | at::Tensor out_; |
20615 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20616 | at::functionalization::impl::sync(out); |
20617 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20618 | } else { |
20619 | out_ = out; |
20620 | } |
20621 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20622 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(vec2))) { |
20623 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20624 | TORCH_INTERNAL_ASSERT(false, |
20625 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20626 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20627 | } else { |
20628 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20629 | at::AutoDispatchSkipFunctionalize guard; |
20630 | at::Tensor tmp_output = at::_ops::outer_out::call(self_, vec2_, out_); |
20631 | return out;; |
20632 | } |
20633 | } else { |
20634 | at::Tensor tmp_output; |
20635 | { |
20636 | at::AutoDispatchSkipFunctionalize guard; |
20637 | tmp_output = at::_ops::outer::call(self_, vec2_); |
20638 | } |
20639 | at::functionalization::impl::replace_(out, tmp_output); |
20640 | at::functionalization::impl::commit_update(out); |
20641 | at::functionalization::impl::sync(out); |
20642 | return out; |
20643 | } |
20644 | } |
20645 | |
20646 | ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_svd_out_U(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { |
20647 | if (false) { |
20648 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20649 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20650 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20651 | auto A_meta = to_meta(A); |
20652 | auto U_meta = to_meta(U); |
20653 | auto S_meta = to_meta(S); |
20654 | auto Vh_meta = to_meta(Vh); |
20655 | at::AutoDispatchSkipFunctionalize func_guard; |
20656 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20657 | at::_ops::linalg_svd_U::call(A_meta, full_matrices, driver, U_meta, S_meta, Vh_meta); |
20658 | } |
20659 | |
20660 | at::Tensor A_; |
20661 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
20662 | at::functionalization::impl::sync(A); |
20663 | A_ = at::functionalization::impl::from_functional_tensor(A); |
20664 | } else { |
20665 | A_ = A; |
20666 | } |
20667 | |
20668 | at::Tensor U_; |
20669 | if (at::functionalization::impl::isFunctionalTensor(U)) { |
20670 | at::functionalization::impl::sync(U); |
20671 | U_ = at::functionalization::impl::from_functional_tensor(U); |
20672 | } else { |
20673 | U_ = U; |
20674 | } |
20675 | |
20676 | at::Tensor S_; |
20677 | if (at::functionalization::impl::isFunctionalTensor(S)) { |
20678 | at::functionalization::impl::sync(S); |
20679 | S_ = at::functionalization::impl::from_functional_tensor(S); |
20680 | } else { |
20681 | S_ = S; |
20682 | } |
20683 | |
20684 | at::Tensor Vh_; |
20685 | if (at::functionalization::impl::isFunctionalTensor(Vh)) { |
20686 | at::functionalization::impl::sync(Vh); |
20687 | Vh_ = at::functionalization::impl::from_functional_tensor(Vh); |
20688 | } else { |
20689 | Vh_ = Vh; |
20690 | } |
20691 | if (!(true && at::functionalization::impl::isFunctionalTensor(U) && at::functionalization::impl::isFunctionalTensor(S) && at::functionalization::impl::isFunctionalTensor(Vh))) { |
20692 | if ((false || at::functionalization::impl::isFunctionalTensor(A))) { |
20693 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20694 | TORCH_INTERNAL_ASSERT(false, |
20695 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20696 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20697 | } else { |
20698 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20699 | at::AutoDispatchSkipFunctionalize guard; |
20700 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_svd_U::call(A_, full_matrices, driver, U_, S_, Vh_); |
20701 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh);; |
20702 | } |
20703 | } else { |
20704 | ::std::tuple<at::Tensor,at::Tensor,at::Tensor> tmp_output; |
20705 | { |
20706 | at::AutoDispatchSkipFunctionalize guard; |
20707 | tmp_output = at::_ops::linalg_svd::call(A_, full_matrices, driver); |
20708 | } |
20709 | at::functionalization::impl::replace_(U, std::get<0>(tmp_output)); |
20710 | at::functionalization::impl::commit_update(U); |
20711 | at::functionalization::impl::sync(U); |
20712 | at::functionalization::impl::replace_(S, std::get<1>(tmp_output)); |
20713 | at::functionalization::impl::commit_update(S); |
20714 | at::functionalization::impl::sync(S); |
20715 | at::functionalization::impl::replace_(Vh, std::get<2>(tmp_output)); |
20716 | at::functionalization::impl::commit_update(Vh); |
20717 | at::functionalization::impl::sync(Vh); |
20718 | return ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &>(U, S, Vh); |
20719 | } |
20720 | } |
20721 | |
20722 | at::Tensor & linalg_cond_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out) { |
20723 | if (false) { |
20724 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20725 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20726 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20727 | auto self_meta = to_meta(self); |
20728 | auto out_meta = to_meta(out); |
20729 | at::AutoDispatchSkipFunctionalize func_guard; |
20730 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20731 | at::_ops::linalg_cond_out::call(self_meta, p, out_meta); |
20732 | } |
20733 | |
20734 | at::Tensor self_; |
20735 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20736 | at::functionalization::impl::sync(self); |
20737 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20738 | } else { |
20739 | self_ = self; |
20740 | } |
20741 | |
20742 | at::Tensor out_; |
20743 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20744 | at::functionalization::impl::sync(out); |
20745 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20746 | } else { |
20747 | out_ = out; |
20748 | } |
20749 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20750 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20751 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20752 | TORCH_INTERNAL_ASSERT(false, |
20753 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20754 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20755 | } else { |
20756 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20757 | at::AutoDispatchSkipFunctionalize guard; |
20758 | at::Tensor tmp_output = at::_ops::linalg_cond_out::call(self_, p, out_); |
20759 | return out;; |
20760 | } |
20761 | } else { |
20762 | at::Tensor tmp_output; |
20763 | { |
20764 | at::AutoDispatchSkipFunctionalize guard; |
20765 | tmp_output = at::_ops::linalg_cond::call(self_, p); |
20766 | } |
20767 | at::functionalization::impl::replace_(out, tmp_output); |
20768 | at::functionalization::impl::commit_update(out); |
20769 | at::functionalization::impl::sync(out); |
20770 | return out; |
20771 | } |
20772 | } |
20773 | |
20774 | at::Tensor & linalg_cond_out_p_str_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out) { |
20775 | if (false) { |
20776 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20777 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20778 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20779 | auto self_meta = to_meta(self); |
20780 | auto out_meta = to_meta(out); |
20781 | at::AutoDispatchSkipFunctionalize func_guard; |
20782 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20783 | at::_ops::linalg_cond_p_str_out::call(self_meta, p, out_meta); |
20784 | } |
20785 | |
20786 | at::Tensor self_; |
20787 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20788 | at::functionalization::impl::sync(self); |
20789 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20790 | } else { |
20791 | self_ = self; |
20792 | } |
20793 | |
20794 | at::Tensor out_; |
20795 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20796 | at::functionalization::impl::sync(out); |
20797 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20798 | } else { |
20799 | out_ = out; |
20800 | } |
20801 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20802 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
20803 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20804 | TORCH_INTERNAL_ASSERT(false, |
20805 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20806 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20807 | } else { |
20808 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20809 | at::AutoDispatchSkipFunctionalize guard; |
20810 | at::Tensor tmp_output = at::_ops::linalg_cond_p_str_out::call(self_, p, out_); |
20811 | return out;; |
20812 | } |
20813 | } else { |
20814 | at::Tensor tmp_output; |
20815 | { |
20816 | at::AutoDispatchSkipFunctionalize guard; |
20817 | tmp_output = at::_ops::linalg_cond_p_str::call(self_, p); |
20818 | } |
20819 | at::functionalization::impl::replace_(out, tmp_output); |
20820 | at::functionalization::impl::commit_update(out); |
20821 | at::functionalization::impl::sync(out); |
20822 | return out; |
20823 | } |
20824 | } |
20825 | |
20826 | ::std::tuple<at::Tensor &,at::Tensor &> linalg_solve_ex_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { |
20827 | if (false) { |
20828 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20829 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20830 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20831 | auto A_meta = to_meta(A); |
20832 | auto B_meta = to_meta(B); |
20833 | auto result_meta = to_meta(result); |
20834 | auto info_meta = to_meta(info); |
20835 | at::AutoDispatchSkipFunctionalize func_guard; |
20836 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20837 | at::_ops::linalg_solve_ex_out::call(A_meta, B_meta, left, check_errors, result_meta, info_meta); |
20838 | } |
20839 | |
20840 | at::Tensor A_; |
20841 | if (at::functionalization::impl::isFunctionalTensor(A)) { |
20842 | at::functionalization::impl::sync(A); |
20843 | A_ = at::functionalization::impl::from_functional_tensor(A); |
20844 | } else { |
20845 | A_ = A; |
20846 | } |
20847 | |
20848 | at::Tensor B_; |
20849 | if (at::functionalization::impl::isFunctionalTensor(B)) { |
20850 | at::functionalization::impl::sync(B); |
20851 | B_ = at::functionalization::impl::from_functional_tensor(B); |
20852 | } else { |
20853 | B_ = B; |
20854 | } |
20855 | |
20856 | at::Tensor result_; |
20857 | if (at::functionalization::impl::isFunctionalTensor(result)) { |
20858 | at::functionalization::impl::sync(result); |
20859 | result_ = at::functionalization::impl::from_functional_tensor(result); |
20860 | } else { |
20861 | result_ = result; |
20862 | } |
20863 | |
20864 | at::Tensor info_; |
20865 | if (at::functionalization::impl::isFunctionalTensor(info)) { |
20866 | at::functionalization::impl::sync(info); |
20867 | info_ = at::functionalization::impl::from_functional_tensor(info); |
20868 | } else { |
20869 | info_ = info; |
20870 | } |
20871 | if (!(true && at::functionalization::impl::isFunctionalTensor(result) && at::functionalization::impl::isFunctionalTensor(info))) { |
20872 | if ((false || at::functionalization::impl::isFunctionalTensor(A) || at::functionalization::impl::isFunctionalTensor(B))) { |
20873 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20874 | TORCH_INTERNAL_ASSERT(false, |
20875 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20876 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20877 | } else { |
20878 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20879 | at::AutoDispatchSkipFunctionalize guard; |
20880 | ::std::tuple<at::Tensor,at::Tensor> tmp_output = at::_ops::linalg_solve_ex_out::call(A_, B_, left, check_errors, result_, info_); |
20881 | return ::std::tuple<at::Tensor &,at::Tensor &>(result, info);; |
20882 | } |
20883 | } else { |
20884 | ::std::tuple<at::Tensor,at::Tensor> tmp_output; |
20885 | { |
20886 | at::AutoDispatchSkipFunctionalize guard; |
20887 | tmp_output = at::_ops::linalg_solve_ex::call(A_, B_, left, check_errors); |
20888 | } |
20889 | at::functionalization::impl::replace_(result, std::get<0>(tmp_output)); |
20890 | at::functionalization::impl::commit_update(result); |
20891 | at::functionalization::impl::sync(result); |
20892 | at::functionalization::impl::replace_(info, std::get<1>(tmp_output)); |
20893 | at::functionalization::impl::commit_update(info); |
20894 | at::functionalization::impl::sync(info); |
20895 | return ::std::tuple<at::Tensor &,at::Tensor &>(result, info); |
20896 | } |
20897 | } |
20898 | |
20899 | at::Tensor & linalg_tensorsolve_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) { |
20900 | if (false) { |
20901 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20902 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20903 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20904 | auto self_meta = to_meta(self); |
20905 | auto other_meta = to_meta(other); |
20906 | auto out_meta = to_meta(out); |
20907 | at::AutoDispatchSkipFunctionalize func_guard; |
20908 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20909 | at::_ops::linalg_tensorsolve_out::call(self_meta, other_meta, dims, out_meta); |
20910 | } |
20911 | |
20912 | at::Tensor self_; |
20913 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
20914 | at::functionalization::impl::sync(self); |
20915 | self_ = at::functionalization::impl::from_functional_tensor(self); |
20916 | } else { |
20917 | self_ = self; |
20918 | } |
20919 | |
20920 | at::Tensor other_; |
20921 | if (at::functionalization::impl::isFunctionalTensor(other)) { |
20922 | at::functionalization::impl::sync(other); |
20923 | other_ = at::functionalization::impl::from_functional_tensor(other); |
20924 | } else { |
20925 | other_ = other; |
20926 | } |
20927 | |
20928 | at::Tensor out_; |
20929 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
20930 | at::functionalization::impl::sync(out); |
20931 | out_ = at::functionalization::impl::from_functional_tensor(out); |
20932 | } else { |
20933 | out_ = out; |
20934 | } |
20935 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
20936 | if ((false || at::functionalization::impl::isFunctionalTensor(self) || at::functionalization::impl::isFunctionalTensor(other))) { |
20937 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
20938 | TORCH_INTERNAL_ASSERT(false, |
20939 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
20940 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
20941 | } else { |
20942 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
20943 | at::AutoDispatchSkipFunctionalize guard; |
20944 | at::Tensor tmp_output = at::_ops::linalg_tensorsolve_out::call(self_, other_, dims, out_); |
20945 | return out;; |
20946 | } |
20947 | } else { |
20948 | at::Tensor tmp_output; |
20949 | { |
20950 | at::AutoDispatchSkipFunctionalize guard; |
20951 | tmp_output = at::_ops::linalg_tensorsolve::call(self_, other_, dims); |
20952 | } |
20953 | at::functionalization::impl::replace_(out, tmp_output); |
20954 | at::functionalization::impl::commit_update(out); |
20955 | at::functionalization::impl::sync(out); |
20956 | return out; |
20957 | } |
20958 | } |
20959 | |
20960 | at::Tensor & linalg_matrix_rank_out_atol_rtol_tensor_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out) { |
20961 | if (false) { |
20962 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
20963 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
20964 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
20965 | auto input_meta = to_meta(input); |
20966 | auto atol_meta = to_meta(atol); |
20967 | auto rtol_meta = to_meta(rtol); |
20968 | auto out_meta = to_meta(out); |
20969 | at::AutoDispatchSkipFunctionalize func_guard; |
20970 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
20971 | at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input_meta, atol_meta, rtol_meta, hermitian, out_meta); |
20972 | } |
20973 | |
20974 | at::Tensor input_; |
20975 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
20976 | at::functionalization::impl::sync(input); |
20977 | input_ = at::functionalization::impl::from_functional_tensor(input); |
20978 | } else { |
20979 | input_ = input; |
20980 | } |
20981 | |
20982 | c10::optional<at::Tensor> atol_; |
20983 | if (at::functionalization::impl::isFunctionalTensor(atol)) { |
20984 | at::functionalization::impl::sync(atol); |
20985 | atol_ = at::functionalization::impl::from_functional_tensor(atol); |
20986 | } else { |
20987 | atol_ = atol; |
20988 | } |
20989 | |
20990 | c10::optional<at::Tensor> rtol_; |
20991 | if (at::functionalization::impl::isFunctionalTensor(rtol)) { |
20992 | at::functionalization::impl::sync(rtol); |
20993 | rtol_ = at::functionalization::impl::from_functional_tensor(rtol); |
20994 | } else { |
20995 | rtol_ = rtol; |
20996 | } |
20997 | |
20998 | at::Tensor out_; |
20999 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21000 | at::functionalization::impl::sync(out); |
21001 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21002 | } else { |
21003 | out_ = out; |
21004 | } |
21005 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21006 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(atol) || at::functionalization::impl::isFunctionalTensor(rtol))) { |
21007 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21008 | TORCH_INTERNAL_ASSERT(false, |
21009 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21010 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21011 | } else { |
21012 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21013 | at::AutoDispatchSkipFunctionalize guard; |
21014 | at::Tensor tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::call(input_, atol_, rtol_, hermitian, out_); |
21015 | return out;; |
21016 | } |
21017 | } else { |
21018 | at::Tensor tmp_output; |
21019 | { |
21020 | at::AutoDispatchSkipFunctionalize guard; |
21021 | tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input_, atol_, rtol_, hermitian); |
21022 | } |
21023 | at::functionalization::impl::replace_(out, tmp_output); |
21024 | at::functionalization::impl::commit_update(out); |
21025 | at::functionalization::impl::sync(out); |
21026 | return out; |
21027 | } |
21028 | } |
21029 | |
21030 | at::Tensor & linalg_matrix_rank_out_atol_rtol_float_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out) { |
21031 | if (false) { |
21032 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21033 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21034 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21035 | auto self_meta = to_meta(self); |
21036 | auto out_meta = to_meta(out); |
21037 | at::AutoDispatchSkipFunctionalize func_guard; |
21038 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21039 | at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self_meta, atol, rtol, hermitian, out_meta); |
21040 | } |
21041 | |
21042 | at::Tensor self_; |
21043 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21044 | at::functionalization::impl::sync(self); |
21045 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21046 | } else { |
21047 | self_ = self; |
21048 | } |
21049 | |
21050 | at::Tensor out_; |
21051 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21052 | at::functionalization::impl::sync(out); |
21053 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21054 | } else { |
21055 | out_ = out; |
21056 | } |
21057 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21058 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21059 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21060 | TORCH_INTERNAL_ASSERT(false, |
21061 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21062 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21063 | } else { |
21064 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21065 | at::AutoDispatchSkipFunctionalize guard; |
21066 | at::Tensor tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_float_out::call(self_, atol, rtol, hermitian, out_); |
21067 | return out;; |
21068 | } |
21069 | } else { |
21070 | at::Tensor tmp_output; |
21071 | { |
21072 | at::AutoDispatchSkipFunctionalize guard; |
21073 | tmp_output = at::_ops::linalg_matrix_rank_atol_rtol_float::call(self_, atol, rtol, hermitian); |
21074 | } |
21075 | at::functionalization::impl::replace_(out, tmp_output); |
21076 | at::functionalization::impl::commit_update(out); |
21077 | at::functionalization::impl::sync(out); |
21078 | return out; |
21079 | } |
21080 | } |
21081 | |
21082 | at::Tensor & linalg_matrix_rank_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) { |
21083 | if (false) { |
21084 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21085 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21086 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21087 | auto self_meta = to_meta(self); |
21088 | auto out_meta = to_meta(out); |
21089 | at::AutoDispatchSkipFunctionalize func_guard; |
21090 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21091 | at::_ops::linalg_matrix_rank_out::call(self_meta, tol, hermitian, out_meta); |
21092 | } |
21093 | |
21094 | at::Tensor self_; |
21095 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21096 | at::functionalization::impl::sync(self); |
21097 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21098 | } else { |
21099 | self_ = self; |
21100 | } |
21101 | |
21102 | at::Tensor out_; |
21103 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21104 | at::functionalization::impl::sync(out); |
21105 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21106 | } else { |
21107 | out_ = out; |
21108 | } |
21109 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21110 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21111 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21112 | TORCH_INTERNAL_ASSERT(false, |
21113 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21114 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21115 | } else { |
21116 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21117 | at::AutoDispatchSkipFunctionalize guard; |
21118 | at::Tensor tmp_output = at::_ops::linalg_matrix_rank_out::call(self_, tol, hermitian, out_); |
21119 | return out;; |
21120 | } |
21121 | } else { |
21122 | at::Tensor tmp_output; |
21123 | { |
21124 | at::AutoDispatchSkipFunctionalize guard; |
21125 | tmp_output = at::_ops::linalg_matrix_rank::call(self_, tol, hermitian); |
21126 | } |
21127 | at::functionalization::impl::replace_(out, tmp_output); |
21128 | at::functionalization::impl::commit_update(out); |
21129 | at::functionalization::impl::sync(out); |
21130 | return out; |
21131 | } |
21132 | } |
21133 | |
21134 | at::Tensor & linalg_matrix_rank_out_out_tol_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) { |
21135 | if (false) { |
21136 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21137 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21138 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21139 | auto input_meta = to_meta(input); |
21140 | auto tol_meta = to_meta(tol); |
21141 | auto out_meta = to_meta(out); |
21142 | at::AutoDispatchSkipFunctionalize func_guard; |
21143 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21144 | at::_ops::linalg_matrix_rank_out_tol_tensor::call(input_meta, tol_meta, hermitian, out_meta); |
21145 | } |
21146 | |
21147 | at::Tensor input_; |
21148 | if (at::functionalization::impl::isFunctionalTensor(input)) { |
21149 | at::functionalization::impl::sync(input); |
21150 | input_ = at::functionalization::impl::from_functional_tensor(input); |
21151 | } else { |
21152 | input_ = input; |
21153 | } |
21154 | |
21155 | at::Tensor tol_; |
21156 | if (at::functionalization::impl::isFunctionalTensor(tol)) { |
21157 | at::functionalization::impl::sync(tol); |
21158 | tol_ = at::functionalization::impl::from_functional_tensor(tol); |
21159 | } else { |
21160 | tol_ = tol; |
21161 | } |
21162 | |
21163 | at::Tensor out_; |
21164 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21165 | at::functionalization::impl::sync(out); |
21166 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21167 | } else { |
21168 | out_ = out; |
21169 | } |
21170 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21171 | if ((false || at::functionalization::impl::isFunctionalTensor(input) || at::functionalization::impl::isFunctionalTensor(tol))) { |
21172 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21173 | TORCH_INTERNAL_ASSERT(false, |
21174 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21175 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21176 | } else { |
21177 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21178 | at::AutoDispatchSkipFunctionalize guard; |
21179 | at::Tensor tmp_output = at::_ops::linalg_matrix_rank_out_tol_tensor::call(input_, tol_, hermitian, out_); |
21180 | return out;; |
21181 | } |
21182 | } else { |
21183 | at::Tensor tmp_output; |
21184 | { |
21185 | at::AutoDispatchSkipFunctionalize guard; |
21186 | tmp_output = at::_ops::linalg_matrix_rank_tol_tensor::call(input_, tol_, hermitian); |
21187 | } |
21188 | at::functionalization::impl::replace_(out, tmp_output); |
21189 | at::functionalization::impl::commit_update(out); |
21190 | at::functionalization::impl::sync(out); |
21191 | return out; |
21192 | } |
21193 | } |
21194 | |
21195 | at::Tensor & _test_optional_floatlist_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends, at::Tensor & out) { |
21196 | if (false) { |
21197 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21198 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21199 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21200 | auto values_meta = to_meta(values); |
21201 | auto out_meta = to_meta(out); |
21202 | at::AutoDispatchSkipFunctionalize func_guard; |
21203 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21204 | at::_ops::_test_optional_floatlist_out::call(values_meta, addends, out_meta); |
21205 | } |
21206 | |
21207 | at::Tensor values_; |
21208 | if (at::functionalization::impl::isFunctionalTensor(values)) { |
21209 | at::functionalization::impl::sync(values); |
21210 | values_ = at::functionalization::impl::from_functional_tensor(values); |
21211 | } else { |
21212 | values_ = values; |
21213 | } |
21214 | |
21215 | at::Tensor out_; |
21216 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21217 | at::functionalization::impl::sync(out); |
21218 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21219 | } else { |
21220 | out_ = out; |
21221 | } |
21222 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21223 | if ((false || at::functionalization::impl::isFunctionalTensor(values))) { |
21224 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21225 | TORCH_INTERNAL_ASSERT(false, |
21226 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21227 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21228 | } else { |
21229 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21230 | at::AutoDispatchSkipFunctionalize guard; |
21231 | at::Tensor tmp_output = at::_ops::_test_optional_floatlist_out::call(values_, addends, out_); |
21232 | return out;; |
21233 | } |
21234 | } else { |
21235 | at::Tensor tmp_output; |
21236 | { |
21237 | at::AutoDispatchSkipFunctionalize guard; |
21238 | tmp_output = at::_ops::_test_optional_floatlist::call(values_, addends); |
21239 | } |
21240 | at::functionalization::impl::replace_(out, tmp_output); |
21241 | at::functionalization::impl::commit_update(out); |
21242 | at::functionalization::impl::sync(out); |
21243 | return out; |
21244 | } |
21245 | } |
21246 | |
21247 | at::Tensor & _test_warn_in_autograd_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
21248 | if (false) { |
21249 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21250 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21251 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21252 | auto self_meta = to_meta(self); |
21253 | auto out_meta = to_meta(out); |
21254 | at::AutoDispatchSkipFunctionalize func_guard; |
21255 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21256 | at::_ops::_test_warn_in_autograd_out::call(self_meta, out_meta); |
21257 | } |
21258 | |
21259 | at::Tensor self_; |
21260 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21261 | at::functionalization::impl::sync(self); |
21262 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21263 | } else { |
21264 | self_ = self; |
21265 | } |
21266 | |
21267 | at::Tensor out_; |
21268 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21269 | at::functionalization::impl::sync(out); |
21270 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21271 | } else { |
21272 | out_ = out; |
21273 | } |
21274 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21275 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21276 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21277 | TORCH_INTERNAL_ASSERT(false, |
21278 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21279 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21280 | } else { |
21281 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21282 | at::AutoDispatchSkipFunctionalize guard; |
21283 | at::Tensor tmp_output = at::_ops::_test_warn_in_autograd_out::call(self_, out_); |
21284 | return out;; |
21285 | } |
21286 | } else { |
21287 | at::Tensor tmp_output; |
21288 | { |
21289 | at::AutoDispatchSkipFunctionalize guard; |
21290 | tmp_output = at::_ops::_test_warn_in_autograd::call(self_); |
21291 | } |
21292 | at::functionalization::impl::replace_(out, tmp_output); |
21293 | at::functionalization::impl::commit_update(out); |
21294 | at::functionalization::impl::sync(out); |
21295 | return out; |
21296 | } |
21297 | } |
21298 | |
21299 | at::Tensor & _segment_reduce_backward_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial, at::Tensor & out) { |
21300 | if (false) { |
21301 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21302 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21303 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21304 | auto grad_meta = to_meta(grad); |
21305 | auto output_meta = to_meta(output); |
21306 | auto data_meta = to_meta(data); |
21307 | auto lengths_meta = to_meta(lengths); |
21308 | auto offsets_meta = to_meta(offsets); |
21309 | auto out_meta = to_meta(out); |
21310 | at::AutoDispatchSkipFunctionalize func_guard; |
21311 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21312 | at::_ops::_segment_reduce_backward_out::call(grad_meta, output_meta, data_meta, reduce, lengths_meta, offsets_meta, axis, initial, out_meta); |
21313 | } |
21314 | |
21315 | at::Tensor grad_; |
21316 | if (at::functionalization::impl::isFunctionalTensor(grad)) { |
21317 | at::functionalization::impl::sync(grad); |
21318 | grad_ = at::functionalization::impl::from_functional_tensor(grad); |
21319 | } else { |
21320 | grad_ = grad; |
21321 | } |
21322 | |
21323 | at::Tensor output_; |
21324 | if (at::functionalization::impl::isFunctionalTensor(output)) { |
21325 | at::functionalization::impl::sync(output); |
21326 | output_ = at::functionalization::impl::from_functional_tensor(output); |
21327 | } else { |
21328 | output_ = output; |
21329 | } |
21330 | |
21331 | at::Tensor data_; |
21332 | if (at::functionalization::impl::isFunctionalTensor(data)) { |
21333 | at::functionalization::impl::sync(data); |
21334 | data_ = at::functionalization::impl::from_functional_tensor(data); |
21335 | } else { |
21336 | data_ = data; |
21337 | } |
21338 | |
21339 | c10::optional<at::Tensor> lengths_; |
21340 | if (at::functionalization::impl::isFunctionalTensor(lengths)) { |
21341 | at::functionalization::impl::sync(lengths); |
21342 | lengths_ = at::functionalization::impl::from_functional_tensor(lengths); |
21343 | } else { |
21344 | lengths_ = lengths; |
21345 | } |
21346 | |
21347 | c10::optional<at::Tensor> offsets_; |
21348 | if (at::functionalization::impl::isFunctionalTensor(offsets)) { |
21349 | at::functionalization::impl::sync(offsets); |
21350 | offsets_ = at::functionalization::impl::from_functional_tensor(offsets); |
21351 | } else { |
21352 | offsets_ = offsets; |
21353 | } |
21354 | |
21355 | at::Tensor out_; |
21356 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21357 | at::functionalization::impl::sync(out); |
21358 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21359 | } else { |
21360 | out_ = out; |
21361 | } |
21362 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21363 | if ((false || at::functionalization::impl::isFunctionalTensor(grad) || at::functionalization::impl::isFunctionalTensor(output) || at::functionalization::impl::isFunctionalTensor(data) || at::functionalization::impl::isFunctionalTensor(lengths) || at::functionalization::impl::isFunctionalTensor(offsets))) { |
21364 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21365 | TORCH_INTERNAL_ASSERT(false, |
21366 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21367 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21368 | } else { |
21369 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21370 | at::AutoDispatchSkipFunctionalize guard; |
21371 | at::Tensor tmp_output = at::_ops::_segment_reduce_backward_out::call(grad_, output_, data_, reduce, lengths_, offsets_, axis, initial, out_); |
21372 | return out;; |
21373 | } |
21374 | } else { |
21375 | at::Tensor tmp_output; |
21376 | { |
21377 | at::AutoDispatchSkipFunctionalize guard; |
21378 | tmp_output = at::_ops::_segment_reduce_backward::call(grad_, output_, data_, reduce, lengths_, offsets_, axis, initial); |
21379 | } |
21380 | at::functionalization::impl::replace_(out, tmp_output); |
21381 | at::functionalization::impl::commit_update(out); |
21382 | at::functionalization::impl::sync(out); |
21383 | return out; |
21384 | } |
21385 | } |
21386 | |
21387 | at::Tensor & _sparse_broadcast_to_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { |
21388 | if (false) { |
21389 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21390 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21391 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21392 | auto self_meta = to_meta(self); |
21393 | auto out_meta = to_meta(out); |
21394 | at::AutoDispatchSkipFunctionalize func_guard; |
21395 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21396 | at::_ops::_sparse_broadcast_to_copy_out::call(self_meta, size, out_meta); |
21397 | } |
21398 | |
21399 | at::Tensor self_; |
21400 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21401 | at::functionalization::impl::sync(self); |
21402 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21403 | } else { |
21404 | self_ = self; |
21405 | } |
21406 | |
21407 | at::Tensor out_; |
21408 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21409 | at::functionalization::impl::sync(out); |
21410 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21411 | } else { |
21412 | out_ = out; |
21413 | } |
21414 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21415 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21416 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21417 | TORCH_INTERNAL_ASSERT(false, |
21418 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21419 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21420 | } else { |
21421 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21422 | at::AutoDispatchSkipFunctionalize guard; |
21423 | at::Tensor tmp_output = at::_ops::_sparse_broadcast_to_copy_out::call(self_, size, out_); |
21424 | return out;; |
21425 | } |
21426 | } else { |
21427 | at::Tensor tmp_output; |
21428 | { |
21429 | at::AutoDispatchSkipFunctionalize guard; |
21430 | tmp_output = at::_ops::_sparse_broadcast_to_copy::call(self_, size); |
21431 | } |
21432 | at::functionalization::impl::replace_(out, tmp_output); |
21433 | at::functionalization::impl::commit_update(out); |
21434 | at::functionalization::impl::sync(out); |
21435 | return out; |
21436 | } |
21437 | } |
21438 | |
21439 | at::Tensor & unsqueeze_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { |
21440 | if (false) { |
21441 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21442 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21443 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21444 | auto self_meta = to_meta(self); |
21445 | auto out_meta = to_meta(out); |
21446 | at::AutoDispatchSkipFunctionalize func_guard; |
21447 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21448 | at::_ops::unsqueeze_copy_out::call(self_meta, dim, out_meta); |
21449 | } |
21450 | |
21451 | at::Tensor self_; |
21452 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21453 | at::functionalization::impl::sync(self); |
21454 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21455 | } else { |
21456 | self_ = self; |
21457 | } |
21458 | |
21459 | at::Tensor out_; |
21460 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21461 | at::functionalization::impl::sync(out); |
21462 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21463 | } else { |
21464 | out_ = out; |
21465 | } |
21466 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21467 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21468 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21469 | TORCH_INTERNAL_ASSERT(false, |
21470 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21471 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21472 | } else { |
21473 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21474 | at::AutoDispatchSkipFunctionalize guard; |
21475 | at::Tensor tmp_output = at::_ops::unsqueeze_copy_out::call(self_, dim, out_); |
21476 | return out;; |
21477 | } |
21478 | } else { |
21479 | at::Tensor tmp_output; |
21480 | { |
21481 | at::AutoDispatchSkipFunctionalize guard; |
21482 | tmp_output = at::_ops::unsqueeze_copy::call(self_, dim); |
21483 | } |
21484 | at::functionalization::impl::replace_(out, tmp_output); |
21485 | at::functionalization::impl::commit_update(out); |
21486 | at::functionalization::impl::sync(out); |
21487 | return out; |
21488 | } |
21489 | } |
21490 | |
21491 | at::Tensor & values_copy_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
21492 | if (false) { |
21493 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21494 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21495 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21496 | auto self_meta = to_meta(self); |
21497 | auto out_meta = to_meta(out); |
21498 | at::AutoDispatchSkipFunctionalize func_guard; |
21499 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21500 | at::_ops::values_copy_out::call(self_meta, out_meta); |
21501 | } |
21502 | |
21503 | at::Tensor self_; |
21504 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21505 | at::functionalization::impl::sync(self); |
21506 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21507 | } else { |
21508 | self_ = self; |
21509 | } |
21510 | |
21511 | at::Tensor out_; |
21512 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21513 | at::functionalization::impl::sync(out); |
21514 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21515 | } else { |
21516 | out_ = out; |
21517 | } |
21518 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21519 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21520 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21521 | TORCH_INTERNAL_ASSERT(false, |
21522 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21523 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21524 | } else { |
21525 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21526 | at::AutoDispatchSkipFunctionalize guard; |
21527 | at::Tensor tmp_output = at::_ops::values_copy_out::call(self_, out_); |
21528 | return out;; |
21529 | } |
21530 | } else { |
21531 | at::Tensor tmp_output; |
21532 | { |
21533 | at::AutoDispatchSkipFunctionalize guard; |
21534 | tmp_output = at::_ops::values_copy::call(self_); |
21535 | } |
21536 | at::functionalization::impl::replace_(out, tmp_output); |
21537 | at::functionalization::impl::commit_update(out); |
21538 | at::functionalization::impl::sync(out); |
21539 | return out; |
21540 | } |
21541 | } |
21542 | |
21543 | at::Tensor & to_padded_tensor_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out) { |
21544 | if (false) { |
21545 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21546 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21547 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21548 | auto self_meta = to_meta(self); |
21549 | auto out_meta = to_meta(out); |
21550 | at::AutoDispatchSkipFunctionalize func_guard; |
21551 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21552 | at::_ops::to_padded_tensor_out::call(self_meta, padding, output_size, out_meta); |
21553 | } |
21554 | |
21555 | at::Tensor self_; |
21556 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21557 | at::functionalization::impl::sync(self); |
21558 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21559 | } else { |
21560 | self_ = self; |
21561 | } |
21562 | |
21563 | at::Tensor out_; |
21564 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21565 | at::functionalization::impl::sync(out); |
21566 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21567 | } else { |
21568 | out_ = out; |
21569 | } |
21570 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21571 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21572 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21573 | TORCH_INTERNAL_ASSERT(false, |
21574 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21575 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21576 | } else { |
21577 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21578 | at::AutoDispatchSkipFunctionalize guard; |
21579 | at::Tensor tmp_output = at::_ops::to_padded_tensor_out::call(self_, padding, output_size, out_); |
21580 | return out;; |
21581 | } |
21582 | } else { |
21583 | at::Tensor tmp_output; |
21584 | { |
21585 | at::AutoDispatchSkipFunctionalize guard; |
21586 | tmp_output = at::_ops::to_padded_tensor::call(self_, padding, output_size); |
21587 | } |
21588 | at::functionalization::impl::replace_(out, tmp_output); |
21589 | at::functionalization::impl::commit_update(out); |
21590 | at::functionalization::impl::sync(out); |
21591 | return out; |
21592 | } |
21593 | } |
21594 | |
21595 | at::Tensor & _triton_scaled_dot_attention_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) { |
21596 | if (false) { |
21597 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21598 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21599 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21600 | auto q_meta = to_meta(q); |
21601 | auto k_meta = to_meta(k); |
21602 | auto v_meta = to_meta(v); |
21603 | auto out_meta = to_meta(out); |
21604 | at::AutoDispatchSkipFunctionalize func_guard; |
21605 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21606 | at::_ops::_triton_scaled_dot_attention_out::call(q_meta, k_meta, v_meta, dropout_p, out_meta); |
21607 | } |
21608 | |
21609 | at::Tensor q_; |
21610 | if (at::functionalization::impl::isFunctionalTensor(q)) { |
21611 | at::functionalization::impl::sync(q); |
21612 | q_ = at::functionalization::impl::from_functional_tensor(q); |
21613 | } else { |
21614 | q_ = q; |
21615 | } |
21616 | |
21617 | at::Tensor k_; |
21618 | if (at::functionalization::impl::isFunctionalTensor(k)) { |
21619 | at::functionalization::impl::sync(k); |
21620 | k_ = at::functionalization::impl::from_functional_tensor(k); |
21621 | } else { |
21622 | k_ = k; |
21623 | } |
21624 | |
21625 | at::Tensor v_; |
21626 | if (at::functionalization::impl::isFunctionalTensor(v)) { |
21627 | at::functionalization::impl::sync(v); |
21628 | v_ = at::functionalization::impl::from_functional_tensor(v); |
21629 | } else { |
21630 | v_ = v; |
21631 | } |
21632 | |
21633 | at::Tensor out_; |
21634 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21635 | at::functionalization::impl::sync(out); |
21636 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21637 | } else { |
21638 | out_ = out; |
21639 | } |
21640 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21641 | if ((false || at::functionalization::impl::isFunctionalTensor(q) || at::functionalization::impl::isFunctionalTensor(k) || at::functionalization::impl::isFunctionalTensor(v))) { |
21642 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21643 | TORCH_INTERNAL_ASSERT(false, |
21644 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21645 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21646 | } else { |
21647 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21648 | at::AutoDispatchSkipFunctionalize guard; |
21649 | at::Tensor tmp_output = at::_ops::_triton_scaled_dot_attention_out::call(q_, k_, v_, dropout_p, out_); |
21650 | return out;; |
21651 | } |
21652 | } else { |
21653 | at::Tensor tmp_output; |
21654 | { |
21655 | at::AutoDispatchSkipFunctionalize guard; |
21656 | tmp_output = at::_ops::_triton_scaled_dot_attention::call(q_, k_, v_, dropout_p); |
21657 | } |
21658 | at::functionalization::impl::replace_(out, tmp_output); |
21659 | at::functionalization::impl::commit_update(out); |
21660 | at::functionalization::impl::sync(out); |
21661 | return out; |
21662 | } |
21663 | } |
21664 | |
21665 | at::Tensor & special_bessel_y0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
21666 | if (false) { |
21667 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21668 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21669 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21670 | auto self_meta = to_meta(self); |
21671 | auto out_meta = to_meta(out); |
21672 | at::AutoDispatchSkipFunctionalize func_guard; |
21673 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21674 | at::_ops::special_bessel_y0_out::call(self_meta, out_meta); |
21675 | } |
21676 | |
21677 | at::Tensor self_; |
21678 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
21679 | at::functionalization::impl::sync(self); |
21680 | self_ = at::functionalization::impl::from_functional_tensor(self); |
21681 | } else { |
21682 | self_ = self; |
21683 | } |
21684 | |
21685 | at::Tensor out_; |
21686 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21687 | at::functionalization::impl::sync(out); |
21688 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21689 | } else { |
21690 | out_ = out; |
21691 | } |
21692 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21693 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
21694 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21695 | TORCH_INTERNAL_ASSERT(false, |
21696 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21697 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21698 | } else { |
21699 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21700 | at::AutoDispatchSkipFunctionalize guard; |
21701 | at::Tensor tmp_output = at::_ops::special_bessel_y0_out::call(self_, out_); |
21702 | return out;; |
21703 | } |
21704 | } else { |
21705 | at::Tensor tmp_output; |
21706 | { |
21707 | at::AutoDispatchSkipFunctionalize guard; |
21708 | tmp_output = at::_ops::special_bessel_y0::call(self_); |
21709 | } |
21710 | at::functionalization::impl::replace_(out, tmp_output); |
21711 | at::functionalization::impl::commit_update(out); |
21712 | at::functionalization::impl::sync(out); |
21713 | return out; |
21714 | } |
21715 | } |
21716 | |
21717 | at::Tensor & special_chebyshev_polynomial_t_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
21718 | if (false) { |
21719 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21720 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21721 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21722 | auto x_meta = to_meta(x); |
21723 | auto n_meta = to_meta(n); |
21724 | auto out_meta = to_meta(out); |
21725 | at::AutoDispatchSkipFunctionalize func_guard; |
21726 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21727 | at::_ops::special_chebyshev_polynomial_t_out::call(x_meta, n_meta, out_meta); |
21728 | } |
21729 | |
21730 | at::Tensor x_; |
21731 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21732 | at::functionalization::impl::sync(x); |
21733 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21734 | } else { |
21735 | x_ = x; |
21736 | } |
21737 | |
21738 | at::Tensor n_; |
21739 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21740 | at::functionalization::impl::sync(n); |
21741 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21742 | } else { |
21743 | n_ = n; |
21744 | } |
21745 | |
21746 | at::Tensor out_; |
21747 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21748 | at::functionalization::impl::sync(out); |
21749 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21750 | } else { |
21751 | out_ = out; |
21752 | } |
21753 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21754 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21755 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21756 | TORCH_INTERNAL_ASSERT(false, |
21757 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21758 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21759 | } else { |
21760 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21761 | at::AutoDispatchSkipFunctionalize guard; |
21762 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_t_out::call(x_, n_, out_); |
21763 | return out;; |
21764 | } |
21765 | } else { |
21766 | at::Tensor tmp_output; |
21767 | { |
21768 | at::AutoDispatchSkipFunctionalize guard; |
21769 | tmp_output = at::_ops::special_chebyshev_polynomial_t::call(x_, n_); |
21770 | } |
21771 | at::functionalization::impl::replace_(out, tmp_output); |
21772 | at::functionalization::impl::commit_update(out); |
21773 | at::functionalization::impl::sync(out); |
21774 | return out; |
21775 | } |
21776 | } |
21777 | |
21778 | at::Tensor & special_chebyshev_polynomial_t_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21779 | if (false) { |
21780 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21781 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21782 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21783 | auto n_meta = to_meta(n); |
21784 | auto out_meta = to_meta(out); |
21785 | at::AutoDispatchSkipFunctionalize func_guard; |
21786 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21787 | at::_ops::special_chebyshev_polynomial_t_x_scalar_out::call(x, n_meta, out_meta); |
21788 | } |
21789 | |
21790 | at::Tensor n_; |
21791 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21792 | at::functionalization::impl::sync(n); |
21793 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21794 | } else { |
21795 | n_ = n; |
21796 | } |
21797 | |
21798 | at::Tensor out_; |
21799 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21800 | at::functionalization::impl::sync(out); |
21801 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21802 | } else { |
21803 | out_ = out; |
21804 | } |
21805 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21806 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21807 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21808 | TORCH_INTERNAL_ASSERT(false, |
21809 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21810 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21811 | } else { |
21812 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21813 | at::AutoDispatchSkipFunctionalize guard; |
21814 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_t_x_scalar_out::call(x, n_, out_); |
21815 | return out;; |
21816 | } |
21817 | } else { |
21818 | at::Tensor tmp_output; |
21819 | { |
21820 | at::AutoDispatchSkipFunctionalize guard; |
21821 | tmp_output = at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n_); |
21822 | } |
21823 | at::functionalization::impl::replace_(out, tmp_output); |
21824 | at::functionalization::impl::commit_update(out); |
21825 | at::functionalization::impl::sync(out); |
21826 | return out; |
21827 | } |
21828 | } |
21829 | |
21830 | at::Tensor & special_chebyshev_polynomial_t_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21831 | if (false) { |
21832 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21833 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21834 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21835 | auto x_meta = to_meta(x); |
21836 | auto out_meta = to_meta(out); |
21837 | at::AutoDispatchSkipFunctionalize func_guard; |
21838 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21839 | at::_ops::special_chebyshev_polynomial_t_n_scalar_out::call(x_meta, n, out_meta); |
21840 | } |
21841 | |
21842 | at::Tensor x_; |
21843 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21844 | at::functionalization::impl::sync(x); |
21845 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21846 | } else { |
21847 | x_ = x; |
21848 | } |
21849 | |
21850 | at::Tensor out_; |
21851 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21852 | at::functionalization::impl::sync(out); |
21853 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21854 | } else { |
21855 | out_ = out; |
21856 | } |
21857 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21858 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
21859 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21860 | TORCH_INTERNAL_ASSERT(false, |
21861 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21862 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21863 | } else { |
21864 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21865 | at::AutoDispatchSkipFunctionalize guard; |
21866 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_t_n_scalar_out::call(x_, n, out_); |
21867 | return out;; |
21868 | } |
21869 | } else { |
21870 | at::Tensor tmp_output; |
21871 | { |
21872 | at::AutoDispatchSkipFunctionalize guard; |
21873 | tmp_output = at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x_, n); |
21874 | } |
21875 | at::functionalization::impl::replace_(out, tmp_output); |
21876 | at::functionalization::impl::commit_update(out); |
21877 | at::functionalization::impl::sync(out); |
21878 | return out; |
21879 | } |
21880 | } |
21881 | |
21882 | at::Tensor & special_chebyshev_polynomial_u_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
21883 | if (false) { |
21884 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21885 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21886 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21887 | auto x_meta = to_meta(x); |
21888 | auto n_meta = to_meta(n); |
21889 | auto out_meta = to_meta(out); |
21890 | at::AutoDispatchSkipFunctionalize func_guard; |
21891 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21892 | at::_ops::special_chebyshev_polynomial_u_out::call(x_meta, n_meta, out_meta); |
21893 | } |
21894 | |
21895 | at::Tensor x_; |
21896 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
21897 | at::functionalization::impl::sync(x); |
21898 | x_ = at::functionalization::impl::from_functional_tensor(x); |
21899 | } else { |
21900 | x_ = x; |
21901 | } |
21902 | |
21903 | at::Tensor n_; |
21904 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21905 | at::functionalization::impl::sync(n); |
21906 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21907 | } else { |
21908 | n_ = n; |
21909 | } |
21910 | |
21911 | at::Tensor out_; |
21912 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21913 | at::functionalization::impl::sync(out); |
21914 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21915 | } else { |
21916 | out_ = out; |
21917 | } |
21918 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21919 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
21920 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21921 | TORCH_INTERNAL_ASSERT(false, |
21922 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21923 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21924 | } else { |
21925 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21926 | at::AutoDispatchSkipFunctionalize guard; |
21927 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_u_out::call(x_, n_, out_); |
21928 | return out;; |
21929 | } |
21930 | } else { |
21931 | at::Tensor tmp_output; |
21932 | { |
21933 | at::AutoDispatchSkipFunctionalize guard; |
21934 | tmp_output = at::_ops::special_chebyshev_polynomial_u::call(x_, n_); |
21935 | } |
21936 | at::functionalization::impl::replace_(out, tmp_output); |
21937 | at::functionalization::impl::commit_update(out); |
21938 | at::functionalization::impl::sync(out); |
21939 | return out; |
21940 | } |
21941 | } |
21942 | |
21943 | at::Tensor & special_chebyshev_polynomial_u_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
21944 | if (false) { |
21945 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21946 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21947 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
21948 | auto n_meta = to_meta(n); |
21949 | auto out_meta = to_meta(out); |
21950 | at::AutoDispatchSkipFunctionalize func_guard; |
21951 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
21952 | at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n_meta, out_meta); |
21953 | } |
21954 | |
21955 | at::Tensor n_; |
21956 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
21957 | at::functionalization::impl::sync(n); |
21958 | n_ = at::functionalization::impl::from_functional_tensor(n); |
21959 | } else { |
21960 | n_ = n; |
21961 | } |
21962 | |
21963 | at::Tensor out_; |
21964 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
21965 | at::functionalization::impl::sync(out); |
21966 | out_ = at::functionalization::impl::from_functional_tensor(out); |
21967 | } else { |
21968 | out_ = out; |
21969 | } |
21970 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
21971 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
21972 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
21973 | TORCH_INTERNAL_ASSERT(false, |
21974 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
21975 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
21976 | } else { |
21977 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
21978 | at::AutoDispatchSkipFunctionalize guard; |
21979 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_u_x_scalar_out::call(x, n_, out_); |
21980 | return out;; |
21981 | } |
21982 | } else { |
21983 | at::Tensor tmp_output; |
21984 | { |
21985 | at::AutoDispatchSkipFunctionalize guard; |
21986 | tmp_output = at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n_); |
21987 | } |
21988 | at::functionalization::impl::replace_(out, tmp_output); |
21989 | at::functionalization::impl::commit_update(out); |
21990 | at::functionalization::impl::sync(out); |
21991 | return out; |
21992 | } |
21993 | } |
21994 | |
21995 | at::Tensor & special_chebyshev_polynomial_u_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
21996 | if (false) { |
21997 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
21998 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
21999 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22000 | auto x_meta = to_meta(x); |
22001 | auto out_meta = to_meta(out); |
22002 | at::AutoDispatchSkipFunctionalize func_guard; |
22003 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22004 | at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x_meta, n, out_meta); |
22005 | } |
22006 | |
22007 | at::Tensor x_; |
22008 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
22009 | at::functionalization::impl::sync(x); |
22010 | x_ = at::functionalization::impl::from_functional_tensor(x); |
22011 | } else { |
22012 | x_ = x; |
22013 | } |
22014 | |
22015 | at::Tensor out_; |
22016 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22017 | at::functionalization::impl::sync(out); |
22018 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22019 | } else { |
22020 | out_ = out; |
22021 | } |
22022 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22023 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
22024 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22025 | TORCH_INTERNAL_ASSERT(false, |
22026 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22027 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22028 | } else { |
22029 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22030 | at::AutoDispatchSkipFunctionalize guard; |
22031 | at::Tensor tmp_output = at::_ops::special_chebyshev_polynomial_u_n_scalar_out::call(x_, n, out_); |
22032 | return out;; |
22033 | } |
22034 | } else { |
22035 | at::Tensor tmp_output; |
22036 | { |
22037 | at::AutoDispatchSkipFunctionalize guard; |
22038 | tmp_output = at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x_, n); |
22039 | } |
22040 | at::functionalization::impl::replace_(out, tmp_output); |
22041 | at::functionalization::impl::commit_update(out); |
22042 | at::functionalization::impl::sync(out); |
22043 | return out; |
22044 | } |
22045 | } |
22046 | |
22047 | at::Tensor & special_hermite_polynomial_h_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { |
22048 | if (false) { |
22049 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22050 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22051 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22052 | auto x_meta = to_meta(x); |
22053 | auto n_meta = to_meta(n); |
22054 | auto out_meta = to_meta(out); |
22055 | at::AutoDispatchSkipFunctionalize func_guard; |
22056 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22057 | at::_ops::special_hermite_polynomial_h_out::call(x_meta, n_meta, out_meta); |
22058 | } |
22059 | |
22060 | at::Tensor x_; |
22061 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
22062 | at::functionalization::impl::sync(x); |
22063 | x_ = at::functionalization::impl::from_functional_tensor(x); |
22064 | } else { |
22065 | x_ = x; |
22066 | } |
22067 | |
22068 | at::Tensor n_; |
22069 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
22070 | at::functionalization::impl::sync(n); |
22071 | n_ = at::functionalization::impl::from_functional_tensor(n); |
22072 | } else { |
22073 | n_ = n; |
22074 | } |
22075 | |
22076 | at::Tensor out_; |
22077 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22078 | at::functionalization::impl::sync(out); |
22079 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22080 | } else { |
22081 | out_ = out; |
22082 | } |
22083 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22084 | if ((false || at::functionalization::impl::isFunctionalTensor(x) || at::functionalization::impl::isFunctionalTensor(n))) { |
22085 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22086 | TORCH_INTERNAL_ASSERT(false, |
22087 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22088 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22089 | } else { |
22090 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22091 | at::AutoDispatchSkipFunctionalize guard; |
22092 | at::Tensor tmp_output = at::_ops::special_hermite_polynomial_h_out::call(x_, n_, out_); |
22093 | return out;; |
22094 | } |
22095 | } else { |
22096 | at::Tensor tmp_output; |
22097 | { |
22098 | at::AutoDispatchSkipFunctionalize guard; |
22099 | tmp_output = at::_ops::special_hermite_polynomial_h::call(x_, n_); |
22100 | } |
22101 | at::functionalization::impl::replace_(out, tmp_output); |
22102 | at::functionalization::impl::commit_update(out); |
22103 | at::functionalization::impl::sync(out); |
22104 | return out; |
22105 | } |
22106 | } |
22107 | |
22108 | at::Tensor & special_hermite_polynomial_h_out_x_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { |
22109 | if (false) { |
22110 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22111 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22112 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22113 | auto n_meta = to_meta(n); |
22114 | auto out_meta = to_meta(out); |
22115 | at::AutoDispatchSkipFunctionalize func_guard; |
22116 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22117 | at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n_meta, out_meta); |
22118 | } |
22119 | |
22120 | at::Tensor n_; |
22121 | if (at::functionalization::impl::isFunctionalTensor(n)) { |
22122 | at::functionalization::impl::sync(n); |
22123 | n_ = at::functionalization::impl::from_functional_tensor(n); |
22124 | } else { |
22125 | n_ = n; |
22126 | } |
22127 | |
22128 | at::Tensor out_; |
22129 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22130 | at::functionalization::impl::sync(out); |
22131 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22132 | } else { |
22133 | out_ = out; |
22134 | } |
22135 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22136 | if ((false || at::functionalization::impl::isFunctionalTensor(n))) { |
22137 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22138 | TORCH_INTERNAL_ASSERT(false, |
22139 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22140 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22141 | } else { |
22142 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22143 | at::AutoDispatchSkipFunctionalize guard; |
22144 | at::Tensor tmp_output = at::_ops::special_hermite_polynomial_h_x_scalar_out::call(x, n_, out_); |
22145 | return out;; |
22146 | } |
22147 | } else { |
22148 | at::Tensor tmp_output; |
22149 | { |
22150 | at::AutoDispatchSkipFunctionalize guard; |
22151 | tmp_output = at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n_); |
22152 | } |
22153 | at::functionalization::impl::replace_(out, tmp_output); |
22154 | at::functionalization::impl::commit_update(out); |
22155 | at::functionalization::impl::sync(out); |
22156 | return out; |
22157 | } |
22158 | } |
22159 | |
22160 | at::Tensor & special_hermite_polynomial_h_out_n_scalar_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { |
22161 | if (false) { |
22162 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22163 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22164 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22165 | auto x_meta = to_meta(x); |
22166 | auto out_meta = to_meta(out); |
22167 | at::AutoDispatchSkipFunctionalize func_guard; |
22168 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22169 | at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x_meta, n, out_meta); |
22170 | } |
22171 | |
22172 | at::Tensor x_; |
22173 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
22174 | at::functionalization::impl::sync(x); |
22175 | x_ = at::functionalization::impl::from_functional_tensor(x); |
22176 | } else { |
22177 | x_ = x; |
22178 | } |
22179 | |
22180 | at::Tensor out_; |
22181 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22182 | at::functionalization::impl::sync(out); |
22183 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22184 | } else { |
22185 | out_ = out; |
22186 | } |
22187 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22188 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
22189 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22190 | TORCH_INTERNAL_ASSERT(false, |
22191 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22192 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22193 | } else { |
22194 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22195 | at::AutoDispatchSkipFunctionalize guard; |
22196 | at::Tensor tmp_output = at::_ops::special_hermite_polynomial_h_n_scalar_out::call(x_, n, out_); |
22197 | return out;; |
22198 | } |
22199 | } else { |
22200 | at::Tensor tmp_output; |
22201 | { |
22202 | at::AutoDispatchSkipFunctionalize guard; |
22203 | tmp_output = at::_ops::special_hermite_polynomial_h_n_scalar::call(x_, n); |
22204 | } |
22205 | at::functionalization::impl::replace_(out, tmp_output); |
22206 | at::functionalization::impl::commit_update(out); |
22207 | at::functionalization::impl::sync(out); |
22208 | return out; |
22209 | } |
22210 | } |
22211 | |
22212 | at::Tensor & special_modified_bessel_k1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { |
22213 | if (false) { |
22214 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22215 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22216 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22217 | auto self_meta = to_meta(self); |
22218 | auto out_meta = to_meta(out); |
22219 | at::AutoDispatchSkipFunctionalize func_guard; |
22220 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22221 | at::_ops::special_modified_bessel_k1_out::call(self_meta, out_meta); |
22222 | } |
22223 | |
22224 | at::Tensor self_; |
22225 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22226 | at::functionalization::impl::sync(self); |
22227 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22228 | } else { |
22229 | self_ = self; |
22230 | } |
22231 | |
22232 | at::Tensor out_; |
22233 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22234 | at::functionalization::impl::sync(out); |
22235 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22236 | } else { |
22237 | out_ = out; |
22238 | } |
22239 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22240 | if ((false || at::functionalization::impl::isFunctionalTensor(self))) { |
22241 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22242 | TORCH_INTERNAL_ASSERT(false, |
22243 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22244 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22245 | } else { |
22246 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22247 | at::AutoDispatchSkipFunctionalize guard; |
22248 | at::Tensor tmp_output = at::_ops::special_modified_bessel_k1_out::call(self_, out_); |
22249 | return out;; |
22250 | } |
22251 | } else { |
22252 | at::Tensor tmp_output; |
22253 | { |
22254 | at::AutoDispatchSkipFunctionalize guard; |
22255 | tmp_output = at::_ops::special_modified_bessel_k1::call(self_); |
22256 | } |
22257 | at::functionalization::impl::replace_(out, tmp_output); |
22258 | at::functionalization::impl::commit_update(out); |
22259 | at::functionalization::impl::sync(out); |
22260 | return out; |
22261 | } |
22262 | } |
22263 | |
22264 | at::Tensor & special_scaled_modified_bessel_k0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { |
22265 | if (false) { |
22266 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22267 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22268 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22269 | auto x_meta = to_meta(x); |
22270 | auto out_meta = to_meta(out); |
22271 | at::AutoDispatchSkipFunctionalize func_guard; |
22272 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22273 | at::_ops::special_scaled_modified_bessel_k0_out::call(x_meta, out_meta); |
22274 | } |
22275 | |
22276 | at::Tensor x_; |
22277 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
22278 | at::functionalization::impl::sync(x); |
22279 | x_ = at::functionalization::impl::from_functional_tensor(x); |
22280 | } else { |
22281 | x_ = x; |
22282 | } |
22283 | |
22284 | at::Tensor out_; |
22285 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22286 | at::functionalization::impl::sync(out); |
22287 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22288 | } else { |
22289 | out_ = out; |
22290 | } |
22291 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22292 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
22293 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22294 | TORCH_INTERNAL_ASSERT(false, |
22295 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22296 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22297 | } else { |
22298 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22299 | at::AutoDispatchSkipFunctionalize guard; |
22300 | at::Tensor tmp_output = at::_ops::special_scaled_modified_bessel_k0_out::call(x_, out_); |
22301 | return out;; |
22302 | } |
22303 | } else { |
22304 | at::Tensor tmp_output; |
22305 | { |
22306 | at::AutoDispatchSkipFunctionalize guard; |
22307 | tmp_output = at::_ops::special_scaled_modified_bessel_k0::call(x_); |
22308 | } |
22309 | at::functionalization::impl::replace_(out, tmp_output); |
22310 | at::functionalization::impl::commit_update(out); |
22311 | at::functionalization::impl::sync(out); |
22312 | return out; |
22313 | } |
22314 | } |
22315 | |
22316 | at::Tensor & special_scaled_modified_bessel_k1_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { |
22317 | if (false) { |
22318 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22319 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22320 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22321 | auto x_meta = to_meta(x); |
22322 | auto out_meta = to_meta(out); |
22323 | at::AutoDispatchSkipFunctionalize func_guard; |
22324 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22325 | at::_ops::special_scaled_modified_bessel_k1_out::call(x_meta, out_meta); |
22326 | } |
22327 | |
22328 | at::Tensor x_; |
22329 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
22330 | at::functionalization::impl::sync(x); |
22331 | x_ = at::functionalization::impl::from_functional_tensor(x); |
22332 | } else { |
22333 | x_ = x; |
22334 | } |
22335 | |
22336 | at::Tensor out_; |
22337 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22338 | at::functionalization::impl::sync(out); |
22339 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22340 | } else { |
22341 | out_ = out; |
22342 | } |
22343 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22344 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
22345 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22346 | TORCH_INTERNAL_ASSERT(false, |
22347 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22348 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22349 | } else { |
22350 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22351 | at::AutoDispatchSkipFunctionalize guard; |
22352 | at::Tensor tmp_output = at::_ops::special_scaled_modified_bessel_k1_out::call(x_, out_); |
22353 | return out;; |
22354 | } |
22355 | } else { |
22356 | at::Tensor tmp_output; |
22357 | { |
22358 | at::AutoDispatchSkipFunctionalize guard; |
22359 | tmp_output = at::_ops::special_scaled_modified_bessel_k1::call(x_); |
22360 | } |
22361 | at::functionalization::impl::replace_(out, tmp_output); |
22362 | at::functionalization::impl::commit_update(out); |
22363 | at::functionalization::impl::sync(out); |
22364 | return out; |
22365 | } |
22366 | } |
22367 | |
22368 | at::Tensor & special_spherical_bessel_j0_out_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { |
22369 | if (false) { |
22370 | // Before converting the mutable op to its functional variant, run meta tensors through the original op. |
22371 | // This will help us catch shape errors that apply to inplace ops that wouldn't apply to their functional variants. |
22372 | // (We can only do this for inplace ops today though, because they technicaly all support meta tensors). |
22373 | auto x_meta = to_meta(x); |
22374 | auto out_meta = to_meta(out); |
22375 | at::AutoDispatchSkipFunctionalize func_guard; |
22376 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22377 | at::_ops::special_spherical_bessel_j0_out::call(x_meta, out_meta); |
22378 | } |
22379 | |
22380 | at::Tensor x_; |
22381 | if (at::functionalization::impl::isFunctionalTensor(x)) { |
22382 | at::functionalization::impl::sync(x); |
22383 | x_ = at::functionalization::impl::from_functional_tensor(x); |
22384 | } else { |
22385 | x_ = x; |
22386 | } |
22387 | |
22388 | at::Tensor out_; |
22389 | if (at::functionalization::impl::isFunctionalTensor(out)) { |
22390 | at::functionalization::impl::sync(out); |
22391 | out_ = at::functionalization::impl::from_functional_tensor(out); |
22392 | } else { |
22393 | out_ = out; |
22394 | } |
22395 | if (!(true && at::functionalization::impl::isFunctionalTensor(out))) { |
22396 | if ((false || at::functionalization::impl::isFunctionalTensor(x))) { |
22397 | // case 1: trying to mutate a non functional tensor with a functional tensor is an error |
22398 | TORCH_INTERNAL_ASSERT(false, |
22399 | "mutating a non-functional tensor with a functional tensor is not allowed." , |
22400 | " Please ensure that all of your inputs are wrapped inside of a functionalize() call." ); |
22401 | } else { |
22402 | // case 2: arguments are not functional tensors, so we no-op and redispatch. |
22403 | at::AutoDispatchSkipFunctionalize guard; |
22404 | at::Tensor tmp_output = at::_ops::special_spherical_bessel_j0_out::call(x_, out_); |
22405 | return out;; |
22406 | } |
22407 | } else { |
22408 | at::Tensor tmp_output; |
22409 | { |
22410 | at::AutoDispatchSkipFunctionalize guard; |
22411 | tmp_output = at::_ops::special_spherical_bessel_j0::call(x_); |
22412 | } |
22413 | at::functionalization::impl::replace_(out, tmp_output); |
22414 | at::functionalization::impl::commit_update(out); |
22415 | at::functionalization::impl::sync(out); |
22416 | return out; |
22417 | } |
22418 | } |
22419 | |
22420 | at::Tensor _neg_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
22421 | |
22422 | at::Tensor self_; |
22423 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22424 | |
22425 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22426 | } else { |
22427 | self_ = self; |
22428 | } |
22429 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22430 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22431 | at::AutoDispatchSkipFunctionalize guard; |
22432 | return at::_ops::_neg_view::call(self_); |
22433 | } |
22434 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22435 | auto compute_reference_meta = |
22436 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22437 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22438 | at::Tensor reference_tensor_output; |
22439 | if (compute_reference_meta) { |
22440 | auto self_meta = to_meta(self); |
22441 | at::AutoDispatchSkipFunctionalize func_guard; |
22442 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22443 | reference_tensor_output = at::_ops::_neg_view::call(self_meta); |
22444 | } |
22445 | at::Tensor tmp_output; |
22446 | { |
22447 | at::AutoDispatchSkipFunctionalize guard; |
22448 | if (reapply_views) { |
22449 | tmp_output = at::_ops::_neg_view::call(self_); |
22450 | } else { |
22451 | tmp_output = at::_ops::_neg_view_copy::call(self_); |
22452 | } |
22453 | } |
22454 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22455 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22456 | if (reapply_views) { |
22457 | return at::_ops::_neg_view::call(base); |
22458 | } else { |
22459 | return at::_ops::_neg_view_copy::call(base); |
22460 | } |
22461 | }, |
22462 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22463 | return at::functionalization::FunctionalInverses::_neg_view_copy_inverse(base, mutated_view, reapply_views); |
22464 | } |
22465 | ); |
22466 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22467 | // See Note [Propagating strides in the functionalization pass] |
22468 | if (compute_reference_meta) { |
22469 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22470 | } |
22471 | return out; |
22472 | } |
22473 | |
22474 | at::Tensor diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { |
22475 | |
22476 | at::Tensor self_; |
22477 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22478 | |
22479 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22480 | } else { |
22481 | self_ = self; |
22482 | } |
22483 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22484 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22485 | at::AutoDispatchSkipFunctionalize guard; |
22486 | return at::_ops::diagonal::call(self_, offset, dim1, dim2); |
22487 | } |
22488 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22489 | auto compute_reference_meta = |
22490 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22491 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22492 | at::Tensor reference_tensor_output; |
22493 | if (compute_reference_meta) { |
22494 | auto self_meta = to_meta(self); |
22495 | at::AutoDispatchSkipFunctionalize func_guard; |
22496 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22497 | reference_tensor_output = at::_ops::diagonal::call(self_meta, offset, dim1, dim2); |
22498 | } |
22499 | at::Tensor tmp_output; |
22500 | { |
22501 | at::AutoDispatchSkipFunctionalize guard; |
22502 | if (reapply_views) { |
22503 | tmp_output = at::_ops::diagonal::call(self_, offset, dim1, dim2); |
22504 | } else { |
22505 | tmp_output = at::_ops::diagonal_copy::call(self_, offset, dim1, dim2); |
22506 | } |
22507 | } |
22508 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22509 | [reapply_views = reapply_views, offset = offset, dim1 = dim1, dim2 = dim2](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22510 | if (reapply_views) { |
22511 | return at::_ops::diagonal::call(base, offset, dim1, dim2); |
22512 | } else { |
22513 | return at::_ops::diagonal_copy::call(base, offset, dim1, dim2); |
22514 | } |
22515 | }, |
22516 | [reapply_views = reapply_views, offset = offset, dim1 = dim1, dim2 = dim2](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22517 | return at::functionalization::FunctionalInverses::diagonal_copy_inverse(base, mutated_view, reapply_views, offset, dim1, dim2); |
22518 | } |
22519 | ); |
22520 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22521 | // See Note [Propagating strides in the functionalization pass] |
22522 | if (compute_reference_meta) { |
22523 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22524 | } |
22525 | return out; |
22526 | } |
22527 | |
22528 | at::Tensor select_int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index) { |
22529 | |
22530 | at::Tensor self_; |
22531 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22532 | |
22533 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22534 | } else { |
22535 | self_ = self; |
22536 | } |
22537 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22538 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22539 | at::AutoDispatchSkipFunctionalize guard; |
22540 | return at::_ops::select_int::call(self_, dim, index); |
22541 | } |
22542 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22543 | auto compute_reference_meta = |
22544 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22545 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22546 | at::Tensor reference_tensor_output; |
22547 | if (compute_reference_meta) { |
22548 | auto self_meta = to_meta(self); |
22549 | at::AutoDispatchSkipFunctionalize func_guard; |
22550 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22551 | reference_tensor_output = at::_ops::select_int::call(self_meta, dim, index); |
22552 | } |
22553 | at::Tensor tmp_output; |
22554 | { |
22555 | at::AutoDispatchSkipFunctionalize guard; |
22556 | if (reapply_views) { |
22557 | tmp_output = at::_ops::select_int::call(self_, dim, index); |
22558 | } else { |
22559 | tmp_output = at::_ops::select_copy_int::call(self_, dim, index); |
22560 | } |
22561 | } |
22562 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22563 | [reapply_views = reapply_views, dim = dim, index = index](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22564 | if (reapply_views) { |
22565 | return at::_ops::select_int::call(base, dim, index); |
22566 | } else { |
22567 | return at::_ops::select_copy_int::call(base, dim, index); |
22568 | } |
22569 | }, |
22570 | [reapply_views = reapply_views, dim = dim, index = index](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22571 | return at::functionalization::FunctionalInverses::select_copy_int_inverse(base, mutated_view, reapply_views, dim, index); |
22572 | } |
22573 | ); |
22574 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22575 | // See Note [Propagating strides in the functionalization pass] |
22576 | if (compute_reference_meta) { |
22577 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22578 | } |
22579 | return out; |
22580 | } |
22581 | |
22582 | ::std::vector<at::Tensor> split_with_sizes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { |
22583 | |
22584 | at::Tensor self_; |
22585 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22586 | |
22587 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22588 | } else { |
22589 | self_ = self; |
22590 | } |
22591 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22592 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22593 | at::AutoDispatchSkipFunctionalize guard; |
22594 | return at::_ops::split_with_sizes::call(self_, split_sizes, dim); |
22595 | } |
22596 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22597 | auto compute_reference_meta = |
22598 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22599 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22600 | ::std::vector<at::Tensor> reference_tensor_output; |
22601 | if (compute_reference_meta) { |
22602 | auto self_meta = to_meta(self); |
22603 | at::AutoDispatchSkipFunctionalize func_guard; |
22604 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22605 | reference_tensor_output = at::_ops::split_with_sizes::call(self_meta, split_sizes, dim); |
22606 | } |
22607 | ::std::vector<at::Tensor> tmp_output; |
22608 | { |
22609 | at::AutoDispatchSkipFunctionalize guard; |
22610 | if (reapply_views) { |
22611 | tmp_output = at::_ops::split_with_sizes::call(self_, split_sizes, dim); |
22612 | } else { |
22613 | tmp_output = at::_ops::split_with_sizes_copy::call(self_, split_sizes, dim); |
22614 | } |
22615 | } |
22616 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22617 | [reapply_views = reapply_views, split_sizes = split_sizes.vec(), dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22618 | if (reapply_views) { |
22619 | return at::_ops::split_with_sizes::call(base, split_sizes, dim)[mutated_view_idx]; |
22620 | } else { |
22621 | return at::_ops::split_with_sizes_copy::call(base, split_sizes, dim)[mutated_view_idx]; |
22622 | } |
22623 | }, |
22624 | [reapply_views = reapply_views, split_sizes = split_sizes.vec(), dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22625 | return at::functionalization::FunctionalInverses::split_with_sizes_copy_inverse(base, mutated_view, reapply_views, mutated_view_idx, split_sizes, dim); |
22626 | } |
22627 | ); |
22628 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22629 | // See Note [Propagating strides in the functionalization pass] |
22630 | if (compute_reference_meta) { |
22631 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22632 | } |
22633 | return out; |
22634 | } |
22635 | |
22636 | at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
22637 | |
22638 | at::Tensor self_; |
22639 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22640 | |
22641 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22642 | } else { |
22643 | self_ = self; |
22644 | } |
22645 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22646 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22647 | at::AutoDispatchSkipFunctionalize guard; |
22648 | return at::_ops::squeeze::call(self_); |
22649 | } |
22650 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22651 | auto compute_reference_meta = |
22652 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22653 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22654 | at::Tensor reference_tensor_output; |
22655 | if (compute_reference_meta) { |
22656 | auto self_meta = to_meta(self); |
22657 | at::AutoDispatchSkipFunctionalize func_guard; |
22658 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22659 | reference_tensor_output = at::_ops::squeeze::call(self_meta); |
22660 | } |
22661 | at::Tensor tmp_output; |
22662 | { |
22663 | at::AutoDispatchSkipFunctionalize guard; |
22664 | if (reapply_views) { |
22665 | tmp_output = at::_ops::squeeze::call(self_); |
22666 | } else { |
22667 | tmp_output = at::_ops::squeeze_copy::call(self_); |
22668 | } |
22669 | } |
22670 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22671 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22672 | if (reapply_views) { |
22673 | return at::_ops::squeeze::call(base); |
22674 | } else { |
22675 | return at::_ops::squeeze_copy::call(base); |
22676 | } |
22677 | }, |
22678 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22679 | return at::functionalization::FunctionalInverses::squeeze_copy_inverse(base, mutated_view, reapply_views); |
22680 | } |
22681 | ); |
22682 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22683 | // See Note [Propagating strides in the functionalization pass] |
22684 | if (compute_reference_meta) { |
22685 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22686 | } |
22687 | return out; |
22688 | } |
22689 | |
22690 | at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { |
22691 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22692 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22693 | |
22694 | at::Tensor self_; |
22695 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22696 | |
22697 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22698 | } else { |
22699 | self_ = self; |
22700 | } |
22701 | at::AutoDispatchSkipFunctionalize guard; |
22702 | return at::_ops::squeeze_::call(self_); |
22703 | } |
22704 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22705 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22706 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22707 | if (reapply_views) { |
22708 | return at::_ops::squeeze::call(base); |
22709 | } else { |
22710 | return at::_ops::squeeze_copy::call(base); |
22711 | } |
22712 | }, |
22713 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22714 | return at::functionalization::FunctionalInverses::squeeze_copy_inverse(base, mutated_view, reapply_views); |
22715 | } |
22716 | ); |
22717 | auto compute_reference_meta = |
22718 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22719 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22720 | at::Tensor reference_tensor_output; |
22721 | if (compute_reference_meta) { |
22722 | auto self_meta = to_meta(self); |
22723 | at::AutoDispatchSkipFunctionalize func_guard; |
22724 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22725 | reference_tensor_output = at::_ops::squeeze_::call(self_meta); |
22726 | } |
22727 | // This function adds the above view meta to the current tensor and replays them off the base, |
22728 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
22729 | // Because of this, we need to make sure to run the reference shape function above, |
22730 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
22731 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
22732 | // See Note [Propagating strides in the functionalization pass] |
22733 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
22734 | // on a reference implementation here (instead of relying on the output from the forward lambda |
22735 | // having the correct stride info) |
22736 | if (compute_reference_meta) { |
22737 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
22738 | } |
22739 | return self; |
22740 | } |
22741 | |
22742 | at::Tensor squeeze_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { |
22743 | |
22744 | at::Tensor self_; |
22745 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22746 | |
22747 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22748 | } else { |
22749 | self_ = self; |
22750 | } |
22751 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22752 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22753 | at::AutoDispatchSkipFunctionalize guard; |
22754 | return at::_ops::squeeze_dim::call(self_, dim); |
22755 | } |
22756 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22757 | auto compute_reference_meta = |
22758 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22759 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22760 | at::Tensor reference_tensor_output; |
22761 | if (compute_reference_meta) { |
22762 | auto self_meta = to_meta(self); |
22763 | at::AutoDispatchSkipFunctionalize func_guard; |
22764 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22765 | reference_tensor_output = at::_ops::squeeze_dim::call(self_meta, dim); |
22766 | } |
22767 | at::Tensor tmp_output; |
22768 | { |
22769 | at::AutoDispatchSkipFunctionalize guard; |
22770 | if (reapply_views) { |
22771 | tmp_output = at::_ops::squeeze_dim::call(self_, dim); |
22772 | } else { |
22773 | tmp_output = at::_ops::squeeze_copy_dim::call(self_, dim); |
22774 | } |
22775 | } |
22776 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22777 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22778 | if (reapply_views) { |
22779 | return at::_ops::squeeze_dim::call(base, dim); |
22780 | } else { |
22781 | return at::_ops::squeeze_copy_dim::call(base, dim); |
22782 | } |
22783 | }, |
22784 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22785 | return at::functionalization::FunctionalInverses::squeeze_copy_dim_inverse(base, mutated_view, reapply_views, dim); |
22786 | } |
22787 | ); |
22788 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22789 | // See Note [Propagating strides in the functionalization pass] |
22790 | if (compute_reference_meta) { |
22791 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22792 | } |
22793 | return out; |
22794 | } |
22795 | |
22796 | at::Tensor & squeeze__dim(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) { |
22797 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22798 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22799 | |
22800 | at::Tensor self_; |
22801 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22802 | |
22803 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22804 | } else { |
22805 | self_ = self; |
22806 | } |
22807 | at::AutoDispatchSkipFunctionalize guard; |
22808 | return at::_ops::squeeze__dim::call(self_, dim); |
22809 | } |
22810 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22811 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22812 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22813 | if (reapply_views) { |
22814 | return at::_ops::squeeze_dim::call(base, dim); |
22815 | } else { |
22816 | return at::_ops::squeeze_copy_dim::call(base, dim); |
22817 | } |
22818 | }, |
22819 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22820 | return at::functionalization::FunctionalInverses::squeeze_copy_dim_inverse(base, mutated_view, reapply_views, dim); |
22821 | } |
22822 | ); |
22823 | auto compute_reference_meta = |
22824 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22825 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22826 | at::Tensor reference_tensor_output; |
22827 | if (compute_reference_meta) { |
22828 | auto self_meta = to_meta(self); |
22829 | at::AutoDispatchSkipFunctionalize func_guard; |
22830 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22831 | reference_tensor_output = at::_ops::squeeze__dim::call(self_meta, dim); |
22832 | } |
22833 | // This function adds the above view meta to the current tensor and replays them off the base, |
22834 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
22835 | // Because of this, we need to make sure to run the reference shape function above, |
22836 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
22837 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
22838 | // See Note [Propagating strides in the functionalization pass] |
22839 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
22840 | // on a reference implementation here (instead of relying on the output from the forward lambda |
22841 | // having the correct stride info) |
22842 | if (compute_reference_meta) { |
22843 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
22844 | } |
22845 | return self; |
22846 | } |
22847 | |
22848 | at::Tensor squeeze_dims(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { |
22849 | |
22850 | at::Tensor self_; |
22851 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22852 | |
22853 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22854 | } else { |
22855 | self_ = self; |
22856 | } |
22857 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22858 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22859 | at::AutoDispatchSkipFunctionalize guard; |
22860 | return at::_ops::squeeze_dims::call(self_, dim); |
22861 | } |
22862 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22863 | auto compute_reference_meta = |
22864 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22865 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22866 | at::Tensor reference_tensor_output; |
22867 | if (compute_reference_meta) { |
22868 | auto self_meta = to_meta(self); |
22869 | at::AutoDispatchSkipFunctionalize func_guard; |
22870 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22871 | reference_tensor_output = at::_ops::squeeze_dims::call(self_meta, dim); |
22872 | } |
22873 | at::Tensor tmp_output; |
22874 | { |
22875 | at::AutoDispatchSkipFunctionalize guard; |
22876 | if (reapply_views) { |
22877 | tmp_output = at::_ops::squeeze_dims::call(self_, dim); |
22878 | } else { |
22879 | tmp_output = at::_ops::squeeze_copy_dims::call(self_, dim); |
22880 | } |
22881 | } |
22882 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22883 | [reapply_views = reapply_views, dim = dim.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22884 | if (reapply_views) { |
22885 | return at::_ops::squeeze_dims::call(base, dim); |
22886 | } else { |
22887 | return at::_ops::squeeze_copy_dims::call(base, dim); |
22888 | } |
22889 | }, |
22890 | [reapply_views = reapply_views, dim = dim.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22891 | return at::functionalization::FunctionalInverses::squeeze_copy_dims_inverse(base, mutated_view, reapply_views, dim); |
22892 | } |
22893 | ); |
22894 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
22895 | // See Note [Propagating strides in the functionalization pass] |
22896 | if (compute_reference_meta) { |
22897 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
22898 | } |
22899 | return out; |
22900 | } |
22901 | |
22902 | at::Tensor & squeeze__dims(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::IntArrayRef dim) { |
22903 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22904 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22905 | |
22906 | at::Tensor self_; |
22907 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22908 | |
22909 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22910 | } else { |
22911 | self_ = self; |
22912 | } |
22913 | at::AutoDispatchSkipFunctionalize guard; |
22914 | return at::_ops::squeeze__dims::call(self_, dim); |
22915 | } |
22916 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22917 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22918 | [reapply_views = reapply_views, dim = dim.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22919 | if (reapply_views) { |
22920 | return at::_ops::squeeze_dims::call(base, dim); |
22921 | } else { |
22922 | return at::_ops::squeeze_copy_dims::call(base, dim); |
22923 | } |
22924 | }, |
22925 | [reapply_views = reapply_views, dim = dim.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22926 | return at::functionalization::FunctionalInverses::squeeze_copy_dims_inverse(base, mutated_view, reapply_views, dim); |
22927 | } |
22928 | ); |
22929 | auto compute_reference_meta = |
22930 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22931 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22932 | at::Tensor reference_tensor_output; |
22933 | if (compute_reference_meta) { |
22934 | auto self_meta = to_meta(self); |
22935 | at::AutoDispatchSkipFunctionalize func_guard; |
22936 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22937 | reference_tensor_output = at::_ops::squeeze__dims::call(self_meta, dim); |
22938 | } |
22939 | // This function adds the above view meta to the current tensor and replays them off the base, |
22940 | // mutating the size/stride info of the current FunctionalTensorWrapper. |
22941 | // Because of this, we need to make sure to run the reference shape function above, |
22942 | // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) |
22943 | at::functionalization::impl::mutate_view_meta(self, view_meta); |
22944 | // See Note [Propagating strides in the functionalization pass] |
22945 | // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely |
22946 | // on a reference implementation here (instead of relying on the output from the forward lambda |
22947 | // having the correct stride info) |
22948 | if (compute_reference_meta) { |
22949 | at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); |
22950 | } |
22951 | return self; |
22952 | } |
22953 | |
22954 | ::std::vector<at::Tensor> unbind_int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { |
22955 | |
22956 | at::Tensor self_; |
22957 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
22958 | |
22959 | self_ = at::functionalization::impl::from_functional_tensor(self); |
22960 | } else { |
22961 | self_ = self; |
22962 | } |
22963 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
22964 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
22965 | at::AutoDispatchSkipFunctionalize guard; |
22966 | return at::_ops::unbind_int::call(self_, dim); |
22967 | } |
22968 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
22969 | auto compute_reference_meta = |
22970 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
22971 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
22972 | ::std::vector<at::Tensor> reference_tensor_output; |
22973 | if (compute_reference_meta) { |
22974 | auto self_meta = to_meta(self); |
22975 | at::AutoDispatchSkipFunctionalize func_guard; |
22976 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
22977 | reference_tensor_output = at::_ops::unbind_int::call(self_meta, dim); |
22978 | } |
22979 | ::std::vector<at::Tensor> tmp_output; |
22980 | { |
22981 | at::AutoDispatchSkipFunctionalize guard; |
22982 | if (reapply_views) { |
22983 | tmp_output = at::_ops::unbind_int::call(self_, dim); |
22984 | } else { |
22985 | tmp_output = at::_ops::unbind_copy_int::call(self_, dim); |
22986 | } |
22987 | } |
22988 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
22989 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
22990 | if (reapply_views) { |
22991 | return at::_ops::unbind_int::call(base, dim)[mutated_view_idx]; |
22992 | } else { |
22993 | return at::_ops::unbind_copy_int::call(base, dim)[mutated_view_idx]; |
22994 | } |
22995 | }, |
22996 | [reapply_views = reapply_views, dim = dim](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
22997 | return at::functionalization::FunctionalInverses::unbind_copy_int_inverse(base, mutated_view, reapply_views, mutated_view_idx, dim); |
22998 | } |
22999 | ); |
23000 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
23001 | // See Note [Propagating strides in the functionalization pass] |
23002 | if (compute_reference_meta) { |
23003 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
23004 | } |
23005 | return out; |
23006 | } |
23007 | |
23008 | at::Tensor alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { |
23009 | |
23010 | at::Tensor self_; |
23011 | if (at::functionalization::impl::isFunctionalTensor(self)) { |
23012 | |
23013 | self_ = at::functionalization::impl::from_functional_tensor(self); |
23014 | } else { |
23015 | self_ = self; |
23016 | } |
23017 | if (!at::functionalization::impl::isFunctionalTensor(self)) { |
23018 | // functionalization is re-entrant, but will no-op if it wasn't passed a FunctionalTensorWrapper. |
23019 | at::AutoDispatchSkipFunctionalize guard; |
23020 | return at::_ops::alias::call(self_); |
23021 | } |
23022 | auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); |
23023 | auto compute_reference_meta = |
23024 | self.key_set().has_backend(c10::BackendComponent::XLABit) || |
23025 | self.key_set().has_backend(c10::BackendComponent::LazyBit); |
23026 | at::Tensor reference_tensor_output; |
23027 | if (compute_reference_meta) { |
23028 | auto self_meta = to_meta(self); |
23029 | at::AutoDispatchSkipFunctionalize func_guard; |
23030 | c10::impl::ExcludeDispatchKeyGuard guard(exclude_keys_for_meta_dispatch); |
23031 | reference_tensor_output = at::_ops::alias::call(self_meta); |
23032 | } |
23033 | at::Tensor tmp_output; |
23034 | { |
23035 | at::AutoDispatchSkipFunctionalize guard; |
23036 | if (reapply_views) { |
23037 | tmp_output = at::_ops::alias::call(self_); |
23038 | } else { |
23039 | tmp_output = at::_ops::alias_copy::call(self_); |
23040 | } |
23041 | } |
23042 | at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( |
23043 | [reapply_views = reapply_views](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { |
23044 | if (reapply_views) { |
23045 | return at::_ops::alias::call(base); |
23046 | } else { |
23047 | return at::_ops::alias_copy::call(base); |
23048 | } |
23049 | }, |
23050 | [reapply_views = reapply_views](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { |
23051 | return at::functionalization::FunctionalInverses::alias_copy_inverse(base, mutated_view, reapply_views); |
23052 | } |
23053 | ); |
23054 | auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); |
23055 | // See Note [Propagating strides in the functionalization pass] |
23056 | if (compute_reference_meta) { |
23057 | at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); |
23058 | } |
23059 | return out; |
23060 | } |
23061 | |
23062 | } // namespace functionalization |
23063 | |
23064 | namespace { |
23065 | |
23066 | TORCH_LIBRARY_IMPL(aten, Functionalize, m) { |
23067 | m.impl("_new_zeros_with_same_feature_meta.out" , TORCH_FN(functionalization::_new_zeros_with_same_feature_meta_out_out)); |
23068 | m.impl("_cudnn_init_dropout_state.out" , TORCH_FN(functionalization::_cudnn_init_dropout_state_out_out)); |
23069 | m.impl("angle.out" , TORCH_FN(functionalization::angle_out_out)); |
23070 | m.impl("sgn.out" , TORCH_FN(functionalization::sgn_out_out)); |
23071 | m.impl("sgn_" , TORCH_FN(functionalization::sgn_)); |
23072 | m.impl("_add_relu.out" , TORCH_FN(functionalization::_add_relu_out_out)); |
23073 | m.impl("_add_relu_.Tensor" , TORCH_FN(functionalization::_add_relu__Tensor)); |
23074 | m.impl("_add_relu.Scalar_out" , TORCH_FN(functionalization::_add_relu_out_Scalar_out)); |
23075 | m.impl("_add_relu_.Scalar" , TORCH_FN(functionalization::_add_relu__Scalar)); |
23076 | m.impl("any.out" , TORCH_FN(functionalization::any_out_out)); |
23077 | m.impl("any.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::any)); |
23078 | m.impl("any.dimname_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out)>(at::native::any_out)); |
23079 | m.impl("argmin.out" , TORCH_FN(functionalization::argmin_out_out)); |
23080 | m.impl("acosh.out" , TORCH_FN(functionalization::acosh_out_out)); |
23081 | m.impl("acosh_" , TORCH_FN(functionalization::acosh_)); |
23082 | m.impl("arcsin" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::arcsin)); |
23083 | m.impl("arcsin.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::arcsin_out)); |
23084 | m.impl("arcsin_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::arcsin_)); |
23085 | m.impl("arctan" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::arctan)); |
23086 | m.impl("arctan.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::arctan_out)); |
23087 | m.impl("arctan_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::arctan_)); |
23088 | m.impl("bincount.out" , TORCH_FN(functionalization::bincount_out_out)); |
23089 | m.impl("copysign.out" , TORCH_FN(functionalization::copysign_out_out)); |
23090 | m.impl("copysign_.Tensor" , TORCH_FN(functionalization::copysign__Tensor)); |
23091 | m.impl("copysign.Scalar_out" , TORCH_FN(functionalization::copysign_out_Scalar_out)); |
23092 | m.impl("copysign_.Scalar" , TORCH_FN(functionalization::copysign__Scalar)); |
23093 | m.impl("logical_or.out" , TORCH_FN(functionalization::logical_or_out_out)); |
23094 | m.impl("logical_or_" , TORCH_FN(functionalization::logical_or_)); |
23095 | m.impl("cat.out" , TORCH_FN(functionalization::cat_out_out)); |
23096 | m.impl("cat.names" , static_cast<at::Tensor (*)(at::TensorList tensors, at::Dimname dim)>(at::native::cat)); |
23097 | m.impl("cat.names_out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Dimname dim, at::Tensor & out)>(at::native::cat_out)); |
23098 | m.impl("concat" , static_cast<at::Tensor (*)(at::TensorList tensors, int64_t dim)>(at::native::concat)); |
23099 | m.impl("concat.out" , static_cast<at::Tensor & (*)(at::TensorList tensors, int64_t dim, at::Tensor & out)>(at::native::concat_out)); |
23100 | m.impl("concat.names" , static_cast<at::Tensor (*)(at::TensorList tensors, at::Dimname dim)>(at::native::concat)); |
23101 | m.impl("concat.names_out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Dimname dim, at::Tensor & out)>(at::native::concat_out)); |
23102 | m.impl("ceil.out" , TORCH_FN(functionalization::ceil_out_out)); |
23103 | m.impl("ceil_" , TORCH_FN(functionalization::ceil_)); |
23104 | m.impl("polar.out" , TORCH_FN(functionalization::polar_out_out)); |
23105 | m.impl("convolution.out" , TORCH_FN(functionalization::convolution_out_out)); |
23106 | m.impl("convolution_overrideable.out" , TORCH_FN(functionalization::convolution_overrideable_out_out)); |
23107 | m.impl("convolution_backward_overrideable.out" , TORCH_FN(functionalization::convolution_backward_overrideable_out_out)); |
23108 | m.impl("cudnn_affine_grid_generator.out" , TORCH_FN(functionalization::cudnn_affine_grid_generator_out_out)); |
23109 | m.impl("cudnn_batch_norm_backward.out" , TORCH_FN(functionalization::cudnn_batch_norm_backward_out_out)); |
23110 | m.impl("cudnn_convolution_relu.out" , TORCH_FN(functionalization::cudnn_convolution_relu_out_out)); |
23111 | m.impl("cudnn_convolution_add_relu.out" , TORCH_FN(functionalization::cudnn_convolution_add_relu_out_out)); |
23112 | m.impl("cummax.out" , TORCH_FN(functionalization::cummax_out_out)); |
23113 | m.impl("cummax.dimname" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim)>(at::native::cummax)); |
23114 | m.impl("cummax.dimname_out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices)>(at::native::cummax_out)); |
23115 | m.impl("diff" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append)>(at::native::diff)); |
23116 | m.impl("diff.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out)>(at::native::diff_out)); |
23117 | m.impl("embedding_renorm.out" , TORCH_FN(functionalization::embedding_renorm_out_out)); |
23118 | m.impl("embedding_renorm_" , TORCH_FN(functionalization::embedding_renorm_)); |
23119 | m.impl("_embedding_bag_forward_only.out" , TORCH_FN(functionalization::_embedding_bag_forward_only_out_out)); |
23120 | m.impl("_embedding_bag_dense_backward.out" , TORCH_FN(functionalization::_embedding_bag_dense_backward_out_out)); |
23121 | m.impl("empty.names_out" , TORCH_FN(functionalization::empty_out_names_out)); |
23122 | m.impl("empty.out" , static_cast<at::Tensor & (*)(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out)>(at::native::empty_out)); |
23123 | m.impl("empty_like.out" , TORCH_FN(functionalization::empty_like_out_out)); |
23124 | m.impl("erf.out" , TORCH_FN(functionalization::erf_out_out)); |
23125 | m.impl("erf_" , TORCH_FN(functionalization::erf_)); |
23126 | m.impl("erfc.out" , TORCH_FN(functionalization::erfc_out_out)); |
23127 | m.impl("erfc_" , TORCH_FN(functionalization::erfc_)); |
23128 | m.impl("eye.out" , TORCH_FN(functionalization::eye_out_out)); |
23129 | m.impl("eye.m_out" , TORCH_FN(functionalization::eye_out_m_out)); |
23130 | m.impl("gcd.out" , TORCH_FN(functionalization::gcd_out_out)); |
23131 | m.impl("gcd_" , TORCH_FN(functionalization::gcd_)); |
23132 | m.impl("grid_sampler_2d_backward.out" , TORCH_FN(functionalization::grid_sampler_2d_backward_out_out)); |
23133 | m.impl("native_group_norm_backward.out" , TORCH_FN(functionalization::native_group_norm_backward_out_out)); |
23134 | m.impl("_fft_r2c.out" , TORCH_FN(functionalization::_fft_r2c_out_out)); |
23135 | m.impl("index.Tensor_out" , TORCH_FN(functionalization::index_out_Tensor_out)); |
23136 | m.impl("index_put.out" , TORCH_FN(functionalization::index_put_out_out)); |
23137 | m.impl("index_put_" , TORCH_FN(functionalization::index_put_)); |
23138 | m.impl("linear_backward.out" , TORCH_FN(functionalization::linear_backward_out_out)); |
23139 | m.impl("mkldnn_linear_backward_weights.out" , TORCH_FN(functionalization::mkldnn_linear_backward_weights_out_out)); |
23140 | m.impl("logaddexp.out" , TORCH_FN(functionalization::logaddexp_out_out)); |
23141 | m.impl("_logcumsumexp.out" , TORCH_FN(functionalization::_logcumsumexp_out_out)); |
23142 | m.impl("logcumsumexp.out" , TORCH_FN(functionalization::logcumsumexp_out_out)); |
23143 | m.impl("logcumsumexp.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim)>(at::native::logcumsumexp)); |
23144 | m.impl("logcumsumexp.dimname_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Dimname dim, at::Tensor & out)>(at::native::logcumsumexp_out)); |
23145 | m.impl("matmul_backward.out" , TORCH_FN(functionalization::matmul_backward_out_out)); |
23146 | m.impl("mps_max_pool2d_backward.out" , TORCH_FN(functionalization::mps_max_pool2d_backward_out_out)); |
23147 | m.impl("median.out" , TORCH_FN(functionalization::median_out_out)); |
23148 | m.impl("median.dim_values" , TORCH_FN(functionalization::median_out_dim_values)); |
23149 | m.impl("median.names_dim" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim)>(at::native::median)); |
23150 | m.impl("median.names_dim_values" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices)>(at::native::median_out)); |
23151 | m.impl("amin.out" , TORCH_FN(functionalization::amin_out_out)); |
23152 | m.impl("mkldnn_rnn_layer_backward.out" , TORCH_FN(functionalization::mkldnn_rnn_layer_backward_out_out)); |
23153 | m.impl("miopen_convolution.out" , TORCH_FN(functionalization::miopen_convolution_out_out)); |
23154 | m.impl("miopen_depthwise_convolution.out" , TORCH_FN(functionalization::miopen_depthwise_convolution_out_out)); |
23155 | m.impl("miopen_rnn_backward.out" , TORCH_FN(functionalization::miopen_rnn_backward_out_out)); |
23156 | m.impl("native_batch_norm.out" , TORCH_FN(functionalization::native_batch_norm_out_out)); |
23157 | m.impl("batch_norm_elemt.out" , TORCH_FN(functionalization::batch_norm_elemt_out_out)); |
23158 | m.impl("batch_norm_update_stats.out" , TORCH_FN(functionalization::batch_norm_update_stats_out_out)); |
23159 | m.impl("_nnpack_spatial_convolution.out" , TORCH_FN(functionalization::_nnpack_spatial_convolution_out_out)); |
23160 | m.impl("ones_like.out" , TORCH_FN(functionalization::ones_like_out_out)); |
23161 | m.impl("channel_shuffle.out" , TORCH_FN(functionalization::channel_shuffle_out_out)); |
23162 | m.impl("randint.out" , TORCH_FN(functionalization::randint_out_out)); |
23163 | m.impl("randint.generator_out" , TORCH_FN(functionalization::randint_out_generator_out)); |
23164 | m.impl("randint.low_out" , TORCH_FN(functionalization::randint_out_low_out)); |
23165 | m.impl("randint.low_generator_out" , TORCH_FN(functionalization::randint_out_low_generator_out)); |
23166 | m.impl("randperm.out" , TORCH_FN(functionalization::randperm_out_out)); |
23167 | m.impl("randperm.generator_out" , TORCH_FN(functionalization::randperm_out_generator_out)); |
23168 | m.impl("range.out" , TORCH_FN(functionalization::range_out_out)); |
23169 | m.impl("range.out_" , TORCH_FN(functionalization::range_out_out_)); |
23170 | m.impl("reciprocal.out" , TORCH_FN(functionalization::reciprocal_out_out)); |
23171 | m.impl("reciprocal_" , TORCH_FN(functionalization::reciprocal_)); |
23172 | m.impl("gelu_backward.grad_input" , TORCH_FN(functionalization::gelu_backward_out_grad_input)); |
23173 | m.impl("hardshrink.out" , TORCH_FN(functionalization::hardshrink_out_out)); |
23174 | m.impl("hardshrink_backward.grad_input" , TORCH_FN(functionalization::hardshrink_backward_out_grad_input)); |
23175 | m.impl("silu.out" , TORCH_FN(functionalization::silu_out_out)); |
23176 | m.impl("silu_" , TORCH_FN(functionalization::silu_)); |
23177 | m.impl("silu_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad_output, const at::Tensor & self)>(at::native::math_silu_backward)); |
23178 | m.impl("silu_backward.grad_input" , TORCH_FN(functionalization::silu_backward_out_grad_input)); |
23179 | m.impl("sin.out" , TORCH_FN(functionalization::sin_out_out)); |
23180 | m.impl("sin_" , TORCH_FN(functionalization::sin_)); |
23181 | m.impl("_softmax.out" , TORCH_FN(functionalization::_softmax_out_out)); |
23182 | m.impl("sspaddmm" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha)>(at::native::sspaddmm)); |
23183 | m.impl("sspaddmm.out" , TORCH_FN(functionalization::sspaddmm_out_out)); |
23184 | m.impl("_stack.out" , TORCH_FN(functionalization::_stack_out_out)); |
23185 | m.impl("hstack" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::hstack)); |
23186 | m.impl("hstack.out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Tensor & out)>(at::native::hstack_out)); |
23187 | m.impl("dstack" , static_cast<at::Tensor (*)(at::TensorList tensors)>(at::native::dstack)); |
23188 | m.impl("dstack.out" , static_cast<at::Tensor & (*)(at::TensorList tensors, at::Tensor & out)>(at::native::dstack_out)); |
23189 | m.impl("sum.out" , TORCH_FN(functionalization::sum_out_out)); |
23190 | m.impl("sum.IntList_out" , TORCH_FN(functionalization::sum_out_IntList_out)); |
23191 | m.impl("sum.dim_DimnameList" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype)>(at::native::sum)); |
23192 | m.impl("sum.DimnameList_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out)>(at::native::sum_out)); |
23193 | m.impl("std.dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim)>(at::native::std)); |
23194 | m.impl("std.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out)>(at::native::std_out)); |
23195 | m.impl("std.correction_out" , TORCH_FN(functionalization::std_out_correction_out)); |
23196 | m.impl("std.names_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim)>(at::native::std)); |
23197 | m.impl("std.names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out)>(at::native::std_out)); |
23198 | m.impl("std.correction_names" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim)>(at::native::std)); |
23199 | m.impl("std.correction_names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out)>(at::native::std_out)); |
23200 | m.impl("tan.out" , TORCH_FN(functionalization::tan_out_out)); |
23201 | m.impl("tan_" , TORCH_FN(functionalization::tan_)); |
23202 | m.impl("tensordot" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other)>(at::native::tensordot)); |
23203 | m.impl("tensordot.out" , TORCH_FN(functionalization::tensordot_out_out)); |
23204 | m.impl("threshold.out" , TORCH_FN(functionalization::threshold_out_out)); |
23205 | m.impl("threshold_" , TORCH_FN(functionalization::threshold_)); |
23206 | m.impl("_nested_tensor_strides.out" , TORCH_FN(functionalization::_nested_tensor_strides_out_out)); |
23207 | m.impl("fix" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::fix)); |
23208 | m.impl("fix.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::fix_out)); |
23209 | m.impl("fix_" , static_cast<at::Tensor & (*)(at::Tensor & self)>(at::native::fix_)); |
23210 | m.impl("unique_consecutive.out" , TORCH_FN(functionalization::unique_consecutive_out_out)); |
23211 | m.impl("var.dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim)>(at::native::var)); |
23212 | m.impl("var.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out)>(at::native::var_out)); |
23213 | m.impl("var.correction_out" , TORCH_FN(functionalization::var_out_correction_out)); |
23214 | m.impl("var.names_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim)>(at::native::var)); |
23215 | m.impl("var.names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out)>(at::native::var_out)); |
23216 | m.impl("var.correction_names" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim)>(at::native::var)); |
23217 | m.impl("var.correction_names_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim, at::Tensor & out)>(at::native::var_out)); |
23218 | m.impl("_standard_gamma_grad.out" , TORCH_FN(functionalization::_standard_gamma_grad_out_out)); |
23219 | m.impl("poisson.out" , TORCH_FN(functionalization::poisson_out_out)); |
23220 | m.impl("_sparse_csr_sum.dim_dtype_out" , TORCH_FN(functionalization::_sparse_csr_sum_out_dim_dtype_out)); |
23221 | m.impl("_sparse_softmax_backward_data.out" , TORCH_FN(functionalization::_sparse_softmax_backward_data_out_out)); |
23222 | m.impl("_sparse_log_softmax.out" , TORCH_FN(functionalization::_sparse_log_softmax_out_out)); |
23223 | m.impl("_sparse_log_softmax_backward_data.out" , TORCH_FN(functionalization::_sparse_log_softmax_backward_data_out_out)); |
23224 | m.impl("resize_as_sparse.out" , TORCH_FN(functionalization::resize_as_sparse_out_out)); |
23225 | m.impl("resize_as_sparse_" , TORCH_FN(functionalization::resize_as_sparse_)); |
23226 | m.impl("sub.out" , TORCH_FN(functionalization::sub_out_out)); |
23227 | m.impl("sub_.Tensor" , TORCH_FN(functionalization::sub__Tensor)); |
23228 | m.impl("sub.Scalar_out" , TORCH_FN(functionalization::sub_out_Scalar_out)); |
23229 | m.impl("sub_.Scalar" , TORCH_FN(functionalization::sub__Scalar)); |
23230 | m.impl("copy_sparse_to_sparse.out" , TORCH_FN(functionalization::copy_sparse_to_sparse_out_out)); |
23231 | m.impl("copy_sparse_to_sparse_" , TORCH_FN(functionalization::copy_sparse_to_sparse_)); |
23232 | m.impl("mkldnn_reorder_conv2d_weight.out" , TORCH_FN(functionalization::mkldnn_reorder_conv2d_weight_out_out)); |
23233 | m.impl("_lstm_mps.out" , TORCH_FN(functionalization::_lstm_mps_out_out)); |
23234 | m.impl("_thnn_fused_gru_cell_backward.out" , TORCH_FN(functionalization::_thnn_fused_gru_cell_backward_out_out)); |
23235 | m.impl("set.source_Storage_out" , TORCH_FN(functionalization::set_out_source_Storage_out)); |
23236 | m.impl("set_.source_Storage" , TORCH_FN(functionalization::set__source_Storage)); |
23237 | m.impl("set.source_Storage_storage_offset_out" , TORCH_FN(functionalization::set_out_source_Storage_storage_offset_out)); |
23238 | m.impl("set_.source_Storage_storage_offset" , TORCH_FN(functionalization::set__source_Storage_storage_offset)); |
23239 | m.impl("set.source_Tensor_out" , TORCH_FN(functionalization::set_out_source_Tensor_out)); |
23240 | m.impl("set_.source_Tensor" , TORCH_FN(functionalization::set__source_Tensor)); |
23241 | m.impl("set.out" , TORCH_FN(functionalization::set_out_out)); |
23242 | m.impl("set_" , TORCH_FN(functionalization::set_)); |
23243 | m.impl("masked_scatter.out" , TORCH_FN(functionalization::masked_scatter_out_out)); |
23244 | m.impl("masked_scatter_" , TORCH_FN(functionalization::masked_scatter_)); |
23245 | m.impl("index_reduce.out" , TORCH_FN(functionalization::index_reduce_out_out)); |
23246 | m.impl("index_reduce_" , TORCH_FN(functionalization::index_reduce_)); |
23247 | m.impl("eq.Scalar_out" , TORCH_FN(functionalization::eq_out_Scalar_out)); |
23248 | m.impl("eq_.Scalar" , TORCH_FN(functionalization::eq__Scalar)); |
23249 | m.impl("eq.Tensor_out" , TORCH_FN(functionalization::eq_out_Tensor_out)); |
23250 | m.impl("eq_.Tensor" , TORCH_FN(functionalization::eq__Tensor)); |
23251 | m.impl("bitwise_and.Tensor_out" , TORCH_FN(functionalization::bitwise_and_out_Tensor_out)); |
23252 | m.impl("bitwise_and_.Tensor" , TORCH_FN(functionalization::bitwise_and__Tensor)); |
23253 | m.impl("bitwise_and.Scalar_out" , TORCH_FN(functionalization::bitwise_and_out_Scalar_out)); |
23254 | m.impl("bitwise_and_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::bitwise_and_)); |
23255 | m.impl("bitwise_and.Scalar_Tensor_out" , TORCH_FN(functionalization::bitwise_and_out_Scalar_Tensor_out)); |
23256 | m.impl("bitwise_or.Tensor_out" , TORCH_FN(functionalization::bitwise_or_out_Tensor_out)); |
23257 | m.impl("bitwise_or_.Tensor" , TORCH_FN(functionalization::bitwise_or__Tensor)); |
23258 | m.impl("bitwise_or.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::bitwise_or)); |
23259 | m.impl("bitwise_or.Scalar_out" , TORCH_FN(functionalization::bitwise_or_out_Scalar_out)); |
23260 | m.impl("bitwise_or_.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::bitwise_or_)); |
23261 | m.impl("bitwise_or.Scalar_Tensor_out" , TORCH_FN(functionalization::bitwise_or_out_Scalar_Tensor_out)); |
23262 | m.impl("bitwise_left_shift.Tensor_out" , TORCH_FN(functionalization::bitwise_left_shift_out_Tensor_out)); |
23263 | m.impl("bitwise_left_shift_.Tensor" , TORCH_FN(functionalization::bitwise_left_shift__Tensor)); |
23264 | m.impl("bitwise_left_shift.Tensor_Scalar_out" , TORCH_FN(functionalization::bitwise_left_shift_out_Tensor_Scalar_out)); |
23265 | m.impl("bitwise_left_shift_.Tensor_Scalar" , TORCH_FN(functionalization::bitwise_left_shift__Tensor_Scalar)); |
23266 | m.impl("bitwise_left_shift.Scalar_Tensor_out" , TORCH_FN(functionalization::bitwise_left_shift_out_Scalar_Tensor_out)); |
23267 | m.impl("__rshift__.Scalar_out" , TORCH_FN(functionalization::__rshift___out_Scalar_out)); |
23268 | m.impl("__irshift__.Scalar" , TORCH_FN(functionalization::__irshift___Scalar)); |
23269 | m.impl("__rshift__.Tensor_out" , TORCH_FN(functionalization::__rshift___out_Tensor_out)); |
23270 | m.impl("__irshift__.Tensor" , TORCH_FN(functionalization::__irshift___Tensor)); |
23271 | m.impl("tril.out" , TORCH_FN(functionalization::tril_out_out)); |
23272 | m.impl("tril_" , TORCH_FN(functionalization::tril_)); |
23273 | m.impl("triu.out" , TORCH_FN(functionalization::triu_out_out)); |
23274 | m.impl("triu_" , TORCH_FN(functionalization::triu_)); |
23275 | m.impl("digamma.out" , TORCH_FN(functionalization::digamma_out_out)); |
23276 | m.impl("digamma_" , TORCH_FN(functionalization::digamma_)); |
23277 | m.impl("lerp.Scalar_out" , TORCH_FN(functionalization::lerp_out_Scalar_out)); |
23278 | m.impl("lerp_.Scalar" , TORCH_FN(functionalization::lerp__Scalar)); |
23279 | m.impl("lerp.Tensor_out" , TORCH_FN(functionalization::lerp_out_Tensor_out)); |
23280 | m.impl("lerp_.Tensor" , TORCH_FN(functionalization::lerp__Tensor)); |
23281 | m.impl("uniform.out" , TORCH_FN(functionalization::uniform_out_out)); |
23282 | m.impl("uniform_" , TORCH_FN(functionalization::uniform_)); |
23283 | m.impl("tril_indices.out" , TORCH_FN(functionalization::tril_indices_out_out)); |
23284 | m.impl("triu_indices.out" , TORCH_FN(functionalization::triu_indices_out_out)); |
23285 | m.impl("ge.Scalar_out" , TORCH_FN(functionalization::ge_out_Scalar_out)); |
23286 | m.impl("ge_.Scalar" , TORCH_FN(functionalization::ge__Scalar)); |
23287 | m.impl("ge.Tensor_out" , TORCH_FN(functionalization::ge_out_Tensor_out)); |
23288 | m.impl("ge_.Tensor" , TORCH_FN(functionalization::ge__Tensor)); |
23289 | m.impl("le.Scalar_out" , TORCH_FN(functionalization::le_out_Scalar_out)); |
23290 | m.impl("le_.Scalar" , TORCH_FN(functionalization::le__Scalar)); |
23291 | m.impl("le.Tensor_out" , TORCH_FN(functionalization::le_out_Tensor_out)); |
23292 | m.impl("le_.Tensor" , TORCH_FN(functionalization::le__Tensor)); |
23293 | m.impl("gt.Scalar_out" , TORCH_FN(functionalization::gt_out_Scalar_out)); |
23294 | m.impl("gt_.Scalar" , TORCH_FN(functionalization::gt__Scalar)); |
23295 | m.impl("gt.Tensor_out" , TORCH_FN(functionalization::gt_out_Tensor_out)); |
23296 | m.impl("gt_.Tensor" , TORCH_FN(functionalization::gt__Tensor)); |
23297 | m.impl("nonzero.out" , TORCH_FN(functionalization::nonzero_out_out)); |
23298 | m.impl("addcdiv.out" , TORCH_FN(functionalization::addcdiv_out_out)); |
23299 | m.impl("addcdiv_" , TORCH_FN(functionalization::addcdiv_)); |
23300 | m.impl("triangular_solve.X" , TORCH_FN(functionalization::triangular_solve_out_X)); |
23301 | m.impl("cholesky_solve.out" , TORCH_FN(functionalization::cholesky_solve_out_out)); |
23302 | m.impl("cholesky_inverse.out" , TORCH_FN(functionalization::cholesky_inverse_out_out)); |
23303 | m.impl("qr" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & self, bool some)>(at::native::qr)); |
23304 | m.impl("qr.Q" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R)>(at::native::qr_out)); |
23305 | m.impl("geqrf.a" , TORCH_FN(functionalization::geqrf_out_a)); |
23306 | m.impl("orgqr" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & input2)>(at::native::orgqr)); |
23307 | m.impl("orgqr.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out)>(at::native::orgqr_out)); |
23308 | m.impl("lu_solve" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots)>(at::native::lu_solve)); |
23309 | m.impl("lu_solve.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out)>(at::native::lu_solve_out)); |
23310 | m.impl("lgamma.out" , TORCH_FN(functionalization::lgamma_out_out)); |
23311 | m.impl("lgamma_" , TORCH_FN(functionalization::lgamma_)); |
23312 | m.impl("erfinv.out" , TORCH_FN(functionalization::erfinv_out_out)); |
23313 | m.impl("erfinv_" , TORCH_FN(functionalization::erfinv_)); |
23314 | m.impl("i0.out" , TORCH_FN(functionalization::i0_out_out)); |
23315 | m.impl("i0_" , TORCH_FN(functionalization::i0_)); |
23316 | m.impl("sign.out" , TORCH_FN(functionalization::sign_out_out)); |
23317 | m.impl("sign_" , TORCH_FN(functionalization::sign_)); |
23318 | m.impl("signbit.out" , TORCH_FN(functionalization::signbit_out_out)); |
23319 | m.impl("atan2.out" , TORCH_FN(functionalization::atan2_out_out)); |
23320 | m.impl("atan2_" , TORCH_FN(functionalization::atan2_)); |
23321 | m.impl("_histogramdd_bin_edges.out" , TORCH_FN(functionalization::_histogramdd_bin_edges_out_out)); |
23322 | m.impl("_histogramdd_from_bin_tensors.out" , TORCH_FN(functionalization::_histogramdd_from_bin_tensors_out_out)); |
23323 | m.impl("fmod.Scalar_out" , TORCH_FN(functionalization::fmod_out_Scalar_out)); |
23324 | m.impl("fmod_.Scalar" , TORCH_FN(functionalization::fmod__Scalar)); |
23325 | m.impl("fmod.Tensor_out" , TORCH_FN(functionalization::fmod_out_Tensor_out)); |
23326 | m.impl("fmod_.Tensor" , TORCH_FN(functionalization::fmod__Tensor)); |
23327 | m.impl("nextafter.out" , TORCH_FN(functionalization::nextafter_out_out)); |
23328 | m.impl("nextafter_" , TORCH_FN(functionalization::nextafter_)); |
23329 | m.impl("minimum.out" , TORCH_FN(functionalization::minimum_out_out)); |
23330 | m.impl("topk.values" , TORCH_FN(functionalization::topk_out_values)); |
23331 | m.impl("any.all_out" , TORCH_FN(functionalization::any_out_all_out)); |
23332 | m.impl("_foreach_mul.Scalar_out" , TORCH_FN(functionalization::_foreach_mul_out_Scalar_out)); |
23333 | m.impl("_foreach_mul_.Scalar" , TORCH_FN(functionalization::_foreach_mul__Scalar)); |
23334 | m.impl("_foreach_div.Scalar_out" , TORCH_FN(functionalization::_foreach_div_out_Scalar_out)); |
23335 | m.impl("_foreach_div_.Scalar" , TORCH_FN(functionalization::_foreach_div__Scalar)); |
23336 | m.impl("_foreach_clamp_max.Scalar_out" , TORCH_FN(functionalization::_foreach_clamp_max_out_Scalar_out)); |
23337 | m.impl("_foreach_clamp_max_.Scalar" , TORCH_FN(functionalization::_foreach_clamp_max__Scalar)); |
23338 | m.impl("_foreach_minimum.Scalar_out" , TORCH_FN(functionalization::_foreach_minimum_out_Scalar_out)); |
23339 | m.impl("_foreach_minimum_.Scalar" , TORCH_FN(functionalization::_foreach_minimum__Scalar)); |
23340 | m.impl("_foreach_mul.List_out" , TORCH_FN(functionalization::_foreach_mul_out_List_out)); |
23341 | m.impl("_foreach_mul_.List" , TORCH_FN(functionalization::_foreach_mul__List)); |
23342 | m.impl("_foreach_div.List_out" , TORCH_FN(functionalization::_foreach_div_out_List_out)); |
23343 | m.impl("_foreach_div_.List" , TORCH_FN(functionalization::_foreach_div__List)); |
23344 | m.impl("_foreach_clamp_max.List_out" , TORCH_FN(functionalization::_foreach_clamp_max_out_List_out)); |
23345 | m.impl("_foreach_clamp_max_.List" , TORCH_FN(functionalization::_foreach_clamp_max__List)); |
23346 | m.impl("_foreach_minimum.List_out" , TORCH_FN(functionalization::_foreach_minimum_out_List_out)); |
23347 | m.impl("_foreach_minimum_.List" , TORCH_FN(functionalization::_foreach_minimum__List)); |
23348 | m.impl("_foreach_div.ScalarList_out" , TORCH_FN(functionalization::_foreach_div_out_ScalarList_out)); |
23349 | m.impl("_foreach_div_.ScalarList" , TORCH_FN(functionalization::_foreach_div__ScalarList)); |
23350 | m.impl("_foreach_mul.ScalarList_out" , TORCH_FN(functionalization::_foreach_mul_out_ScalarList_out)); |
23351 | m.impl("_foreach_mul_.ScalarList" , TORCH_FN(functionalization::_foreach_mul__ScalarList)); |
23352 | m.impl("_foreach_clamp_max.ScalarList_out" , TORCH_FN(functionalization::_foreach_clamp_max_out_ScalarList_out)); |
23353 | m.impl("_foreach_clamp_max_.ScalarList" , TORCH_FN(functionalization::_foreach_clamp_max__ScalarList)); |
23354 | m.impl("_foreach_minimum.ScalarList_out" , TORCH_FN(functionalization::_foreach_minimum_out_ScalarList_out)); |
23355 | m.impl("_foreach_minimum_.ScalarList" , TORCH_FN(functionalization::_foreach_minimum__ScalarList)); |
23356 | m.impl("_foreach_expm1.out" , TORCH_FN(functionalization::_foreach_expm1_out_out)); |
23357 | m.impl("_foreach_expm1_" , TORCH_FN(functionalization::_foreach_expm1_)); |
23358 | m.impl("_foreach_tanh.out" , TORCH_FN(functionalization::_foreach_tanh_out_out)); |
23359 | m.impl("_foreach_tanh_" , TORCH_FN(functionalization::_foreach_tanh_)); |
23360 | m.impl("_foreach_sin.out" , TORCH_FN(functionalization::_foreach_sin_out_out)); |
23361 | m.impl("_foreach_sin_" , TORCH_FN(functionalization::_foreach_sin_)); |
23362 | m.impl("_foreach_frac.out" , TORCH_FN(functionalization::_foreach_frac_out_out)); |
23363 | m.impl("_foreach_frac_" , TORCH_FN(functionalization::_foreach_frac_)); |
23364 | m.impl("_foreach_reciprocal.out" , TORCH_FN(functionalization::_foreach_reciprocal_out_out)); |
23365 | m.impl("_foreach_reciprocal_" , TORCH_FN(functionalization::_foreach_reciprocal_)); |
23366 | m.impl("_foreach_trunc.out" , TORCH_FN(functionalization::_foreach_trunc_out_out)); |
23367 | m.impl("_foreach_trunc_" , TORCH_FN(functionalization::_foreach_trunc_)); |
23368 | m.impl("_convert_indices_from_csr_to_coo.out" , TORCH_FN(functionalization::_convert_indices_from_csr_to_coo_out_out)); |
23369 | m.impl("multilabel_margin_loss_backward.grad_input" , TORCH_FN(functionalization::multilabel_margin_loss_backward_out_grad_input)); |
23370 | m.impl("nll_loss" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index)>(at::native::nll_loss_symint)); |
23371 | m.impl("nll_loss.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out)>(at::native::nll_loss_out)); |
23372 | m.impl("nll_loss_backward.grad_input" , TORCH_FN(functionalization::nll_loss_backward_out_grad_input)); |
23373 | m.impl("nll_loss2d" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index)>(at::native::nll_loss2d_symint)); |
23374 | m.impl("nll_loss2d.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out)>(at::native::nll_loss2d_out)); |
23375 | m.impl("nll_loss2d_backward.grad_input" , TORCH_FN(functionalization::nll_loss2d_backward_out_grad_input)); |
23376 | m.impl("smooth_l1_loss.out" , TORCH_FN(functionalization::smooth_l1_loss_out_out)); |
23377 | m.impl("huber_loss.out" , TORCH_FN(functionalization::huber_loss_out_out)); |
23378 | m.impl("soft_margin_loss_backward.grad_input" , TORCH_FN(functionalization::soft_margin_loss_backward_out_grad_input)); |
23379 | m.impl("elu.out" , TORCH_FN(functionalization::elu_out_out)); |
23380 | m.impl("elu_" , TORCH_FN(functionalization::elu_)); |
23381 | m.impl("glu.out" , TORCH_FN(functionalization::glu_out_out)); |
23382 | m.impl("hardsigmoid.out" , TORCH_FN(functionalization::hardsigmoid_out_out)); |
23383 | m.impl("hardsigmoid_" , TORCH_FN(functionalization::hardsigmoid_)); |
23384 | m.impl("leaky_relu_backward.grad_input" , TORCH_FN(functionalization::leaky_relu_backward_out_grad_input)); |
23385 | m.impl("softshrink.out" , TORCH_FN(functionalization::softshrink_out_out)); |
23386 | m.impl("_adaptive_avg_pool2d.out" , TORCH_FN(functionalization::_adaptive_avg_pool2d_out_out)); |
23387 | m.impl("_adaptive_avg_pool3d.out" , TORCH_FN(functionalization::_adaptive_avg_pool3d_out_out)); |
23388 | m.impl("avg_pool3d.out" , TORCH_FN(functionalization::avg_pool3d_out_out)); |
23389 | m.impl("fractional_max_pool3d.output" , TORCH_FN(functionalization::fractional_max_pool3d_out_output)); |
23390 | m.impl("max_pool2d_with_indices_backward.grad_input" , TORCH_FN(functionalization::max_pool2d_with_indices_backward_out_grad_input)); |
23391 | m.impl("max_unpool3d.out" , TORCH_FN(functionalization::max_unpool3d_out_out)); |
23392 | m.impl("reflection_pad1d.out" , TORCH_FN(functionalization::reflection_pad1d_out_out)); |
23393 | m.impl("reflection_pad1d_backward.grad_input" , TORCH_FN(functionalization::reflection_pad1d_backward_out_grad_input)); |
23394 | m.impl("reflection_pad2d.out" , TORCH_FN(functionalization::reflection_pad2d_out_out)); |
23395 | m.impl("reflection_pad3d.out" , TORCH_FN(functionalization::reflection_pad3d_out_out)); |
23396 | m.impl("replication_pad1d.out" , TORCH_FN(functionalization::replication_pad1d_out_out)); |
23397 | m.impl("replication_pad1d_backward.grad_input" , TORCH_FN(functionalization::replication_pad1d_backward_out_grad_input)); |
23398 | m.impl("replication_pad2d_backward.grad_input" , TORCH_FN(functionalization::replication_pad2d_backward_out_grad_input)); |
23399 | m.impl("replication_pad3d.out" , TORCH_FN(functionalization::replication_pad3d_out_out)); |
23400 | m.impl("replication_pad3d_backward.grad_input" , TORCH_FN(functionalization::replication_pad3d_backward_out_grad_input)); |
23401 | m.impl("upsample_nearest2d.out" , TORCH_FN(functionalization::upsample_nearest2d_out_out)); |
23402 | m.impl("_upsample_nearest_exact2d.out" , TORCH_FN(functionalization::_upsample_nearest_exact2d_out_out)); |
23403 | m.impl("_upsample_nearest_exact2d_backward.grad_input" , TORCH_FN(functionalization::_upsample_nearest_exact2d_backward_out_grad_input)); |
23404 | m.impl("_upsample_nearest_exact3d_backward.grad_input" , TORCH_FN(functionalization::_upsample_nearest_exact3d_backward_out_grad_input)); |
23405 | m.impl("sigmoid_backward.grad_input" , TORCH_FN(functionalization::sigmoid_backward_out_grad_input)); |
23406 | m.impl("slow_conv_transpose2d.out" , TORCH_FN(functionalization::slow_conv_transpose2d_out_out)); |
23407 | m.impl("_slow_conv2d_forward.output" , TORCH_FN(functionalization::_slow_conv2d_forward_out_output)); |
23408 | m.impl("conv_depthwise3d.out" , TORCH_FN(functionalization::conv_depthwise3d_out_out)); |
23409 | m.impl("slow_conv_dilated2d.out" , TORCH_FN(functionalization::slow_conv_dilated2d_out_out)); |
23410 | m.impl("special_ndtri.out" , TORCH_FN(functionalization::special_ndtri_out_out)); |
23411 | m.impl("special_erfc" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_erfc)); |
23412 | m.impl("special_erfc.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_erfc_out)); |
23413 | m.impl("special_logit" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<double> eps)>(at::native::special_logit)); |
23414 | m.impl("special_logit.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out)>(at::native::special_logit_out)); |
23415 | m.impl("special_polygamma" , static_cast<at::Tensor (*)(int64_t n, const at::Tensor & self)>(at::native::special_polygamma)); |
23416 | m.impl("special_polygamma.out" , static_cast<at::Tensor & (*)(int64_t n, const at::Tensor & self, at::Tensor & out)>(at::native::special_polygamma_out)); |
23417 | m.impl("special_sinc" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::special_sinc)); |
23418 | m.impl("special_sinc.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::special_sinc_out)); |
23419 | m.impl("fft_ifft" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm)>(at::native::fft_ifft)); |
23420 | m.impl("fft_ifft.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_ifft_out)); |
23421 | m.impl("fft_ihfft" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm)>(at::native::fft_ihfft)); |
23422 | m.impl("fft_ihfft.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_ihfft_out)); |
23423 | m.impl("fft_ifft2" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_ifft2)); |
23424 | m.impl("fft_ifft2.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out)>(at::native::fft_ifft2_out)); |
23425 | m.impl("fft_ihfftn" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm)>(at::native::fft_ihfftn)); |
23426 | m.impl("fft_ihfftn.out" , static_cast<const at::Tensor & (*)(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, const at::Tensor & out)>(at::native::fft_ihfftn_out)); |
23427 | m.impl("linalg_cholesky_ex.L" , TORCH_FN(functionalization::linalg_cholesky_ex_out_L)); |
23428 | m.impl("linalg_cross.out" , TORCH_FN(functionalization::linalg_cross_out_out)); |
23429 | m.impl("linalg_lu_solve.out" , TORCH_FN(functionalization::linalg_lu_solve_out_out)); |
23430 | m.impl("linalg_matmul" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::linalg_matmul)); |
23431 | m.impl("linalg_matmul.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::Tensor & out)>(at::native::linalg_matmul_out)); |
23432 | m.impl("linalg_vecdot" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Tensor & y, int64_t dim)>(at::native::linalg_vecdot)); |
23433 | m.impl("linalg_vecdot.out" , static_cast<at::Tensor & (*)(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out)>(at::native::linalg_vecdot_out)); |
23434 | m.impl("linalg_eigvalsh" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::string_view UPLO)>(at::native::linalg_eigvalsh)); |
23435 | m.impl("linalg_eigvalsh.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out)>(at::native::linalg_eigvalsh_out)); |
23436 | m.impl("linalg_inv_ex.inverse" , TORCH_FN(functionalization::linalg_inv_ex_out_inverse)); |
23437 | m.impl("inverse" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::inverse)); |
23438 | m.impl("inverse.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, at::Tensor & out)>(at::native::inverse_out)); |
23439 | m.impl("outer" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & vec2)>(at::native::outer)); |
23440 | m.impl("outer.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out)>(at::native::outer_out)); |
23441 | m.impl("linalg_svd" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor> (*)(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver)>(at::native::linalg_svd)); |
23442 | m.impl("linalg_svd.U" , static_cast<::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (*)(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh)>(at::native::linalg_svd_out)); |
23443 | m.impl("linalg_cond" , static_cast<at::Tensor (*)(const at::Tensor & self, const c10::optional<at::Scalar> & p)>(at::native::linalg_cond)); |
23444 | m.impl("linalg_cond.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::Tensor & out)>(at::native::linalg_cond_out)); |
23445 | m.impl("linalg_cond.p_str" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::string_view p)>(at::native::linalg_cond)); |
23446 | m.impl("linalg_cond.p_str_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::string_view p, at::Tensor & out)>(at::native::linalg_cond_out)); |
23447 | m.impl("linalg_solve_ex" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors)>(at::native::linalg_solve_ex)); |
23448 | m.impl("linalg_solve_ex.out" , static_cast<::std::tuple<at::Tensor &,at::Tensor &> (*)(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info)>(at::native::linalg_solve_ex_out)); |
23449 | m.impl("linalg_tensorsolve" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims)>(at::native::linalg_tensorsolve)); |
23450 | m.impl("linalg_tensorsolve.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out)>(at::native::linalg_tensorsolve_out)); |
23451 | m.impl("linalg_matrix_rank.atol_rtol_tensor" , static_cast<at::Tensor (*)(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian)>(at::native::linalg_matrix_rank)); |
23452 | m.impl("linalg_matrix_rank.atol_rtol_tensor_out" , static_cast<at::Tensor & (*)(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian, at::Tensor & out)>(at::native::linalg_matrix_rank_out)); |
23453 | m.impl("linalg_matrix_rank.atol_rtol_float" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian)>(at::native::linalg_matrix_rank)); |
23454 | m.impl("linalg_matrix_rank.atol_rtol_float_out" , static_cast<at::Tensor & (*)(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian, at::Tensor & out)>(at::native::linalg_matrix_rank_out)); |
23455 | m.impl("linalg_matrix_rank" , static_cast<at::Tensor (*)(const at::Tensor & self, double tol, bool hermitian)>(at::native::linalg_matrix_rank)); |
23456 | m.impl("linalg_matrix_rank.out" , static_cast<at::Tensor & (*)(const at::Tensor & self, double tol, bool hermitian, at::Tensor & out)>(at::native::linalg_matrix_rank_out)); |
23457 | m.impl("linalg_matrix_rank.tol_tensor" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & tol, bool hermitian)>(at::native::linalg_matrix_rank)); |
23458 | m.impl("linalg_matrix_rank.out_tol_tensor" , static_cast<at::Tensor & (*)(const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out)>(at::native::linalg_matrix_rank_out)); |
23459 | m.impl("_test_optional_floatlist.out" , TORCH_FN(functionalization::_test_optional_floatlist_out_out)); |
23460 | m.impl("_test_warn_in_autograd.out" , TORCH_FN(functionalization::_test_warn_in_autograd_out_out)); |
23461 | m.impl("_segment_reduce_backward.out" , TORCH_FN(functionalization::_segment_reduce_backward_out_out)); |
23462 | m.impl("_sparse_broadcast_to_copy.out" , TORCH_FN(functionalization::_sparse_broadcast_to_copy_out_out)); |
23463 | m.impl("unsqueeze_copy.out" , TORCH_FN(functionalization::unsqueeze_copy_out_out)); |
23464 | m.impl("values_copy.out" , TORCH_FN(functionalization::values_copy_out_out)); |
23465 | m.impl("to_padded_tensor.out" , TORCH_FN(functionalization::to_padded_tensor_out_out)); |
23466 | m.impl("_triton_scaled_dot_attention.out" , TORCH_FN(functionalization::_triton_scaled_dot_attention_out_out)); |
23467 | m.impl("special_bessel_y0.out" , TORCH_FN(functionalization::special_bessel_y0_out_out)); |
23468 | m.impl("special_chebyshev_polynomial_t.out" , TORCH_FN(functionalization::special_chebyshev_polynomial_t_out_out)); |
23469 | m.impl("special_chebyshev_polynomial_t.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_chebyshev_polynomial_t)); |
23470 | m.impl("special_chebyshev_polynomial_t.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_chebyshev_polynomial_t_out)); |
23471 | m.impl("special_chebyshev_polynomial_t.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_chebyshev_polynomial_t)); |
23472 | m.impl("special_chebyshev_polynomial_t.n_scalar_out" , TORCH_FN(functionalization::special_chebyshev_polynomial_t_out_n_scalar_out)); |
23473 | m.impl("special_chebyshev_polynomial_u.out" , TORCH_FN(functionalization::special_chebyshev_polynomial_u_out_out)); |
23474 | m.impl("special_chebyshev_polynomial_u.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_chebyshev_polynomial_u)); |
23475 | m.impl("special_chebyshev_polynomial_u.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_chebyshev_polynomial_u_out)); |
23476 | m.impl("special_chebyshev_polynomial_u.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_chebyshev_polynomial_u)); |
23477 | m.impl("special_chebyshev_polynomial_u.n_scalar_out" , TORCH_FN(functionalization::special_chebyshev_polynomial_u_out_n_scalar_out)); |
23478 | m.impl("special_hermite_polynomial_h.out" , TORCH_FN(functionalization::special_hermite_polynomial_h_out_out)); |
23479 | m.impl("special_hermite_polynomial_h.x_scalar" , static_cast<at::Tensor (*)(const at::Scalar & x, const at::Tensor & n)>(at::native::special_hermite_polynomial_h)); |
23480 | m.impl("special_hermite_polynomial_h.x_scalar_out" , static_cast<at::Tensor & (*)(const at::Scalar & x, const at::Tensor & n, at::Tensor & out)>(at::native::special_hermite_polynomial_h_out)); |
23481 | m.impl("special_hermite_polynomial_h.n_scalar" , static_cast<at::Tensor (*)(const at::Tensor & x, const at::Scalar & n)>(at::native::special_hermite_polynomial_h)); |
23482 | m.impl("special_hermite_polynomial_h.n_scalar_out" , TORCH_FN(functionalization::special_hermite_polynomial_h_out_n_scalar_out)); |
23483 | m.impl("special_modified_bessel_k1.out" , TORCH_FN(functionalization::special_modified_bessel_k1_out_out)); |
23484 | m.impl("special_scaled_modified_bessel_k0.out" , TORCH_FN(functionalization::special_scaled_modified_bessel_k0_out_out)); |
23485 | m.impl("special_scaled_modified_bessel_k1.out" , TORCH_FN(functionalization::special_scaled_modified_bessel_k1_out_out)); |
23486 | m.impl("special_spherical_bessel_j0.out" , TORCH_FN(functionalization::special_spherical_bessel_j0_out_out)); |
23487 | m.impl("refine_names" , static_cast<at::Tensor (*)(const at::Tensor & self, at::DimnameList names)>(at::native::refine_names)); |
23488 | m.impl("real" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::real)); |
23489 | m.impl("_neg_view" , TORCH_FN(functionalization::_neg_view)); |
23490 | m.impl("diagonal" , TORCH_FN(functionalization::diagonal)); |
23491 | m.impl("diagonal.Dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset)>(at::native::diagonal)); |
23492 | m.impl("narrow" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length)>(at::native::narrow_symint)); |
23493 | m.impl("narrow.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length)>(at::native::narrow_tensor_symint)); |
23494 | m.impl("numpy_T" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::numpy_T)); |
23495 | m.impl("select.Dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, int64_t index)>(at::native::select)); |
23496 | m.impl("select.int" , TORCH_FN(functionalization::select_int)); |
23497 | m.impl("split_with_sizes" , TORCH_FN(functionalization::split_with_sizes)); |
23498 | m.impl("vsplit.int" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, int64_t sections)>(at::native::vsplit)); |
23499 | m.impl("vsplit.array" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::IntArrayRef indices)>(at::native::vsplit)); |
23500 | m.impl("squeeze" , TORCH_FN(functionalization::squeeze)); |
23501 | m.impl("squeeze_" , TORCH_FN(functionalization::squeeze_)); |
23502 | m.impl("squeeze.dim" , TORCH_FN(functionalization::squeeze_dim)); |
23503 | m.impl("squeeze_.dim" , TORCH_FN(functionalization::squeeze__dim)); |
23504 | m.impl("squeeze.dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim)>(at::native::squeeze)); |
23505 | m.impl("squeeze_.dimname" , static_cast<at::Tensor & (*)(at::Tensor & self, at::Dimname dim)>(at::native::squeeze_)); |
23506 | m.impl("squeeze.dims" , TORCH_FN(functionalization::squeeze_dims)); |
23507 | m.impl("squeeze_.dims" , TORCH_FN(functionalization::squeeze__dims)); |
23508 | m.impl("view_as" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::view_as)); |
23509 | m.impl("unbind.int" , TORCH_FN(functionalization::unbind_int)); |
23510 | m.impl("unbind.Dimname" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::Dimname dim)>(at::native::unbind)); |
23511 | m.impl("alias" , TORCH_FN(functionalization::alias)); |
23512 | m.impl("_cast_Char" , static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Char)); |
23513 | m.impl("_cast_Half" , static_cast<at::Tensor (*)(const at::Tensor & self, bool non_blocking)>(at::native::_cast_Half)); |
23514 | m.impl("_backward" , static_cast<void (*)(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph)>(at::native::_backward)); |
23515 | m.impl("set_data" , static_cast<void (*)(at::Tensor & self, const at::Tensor & new_data)>(at::native::set_data)); |
23516 | m.impl("data" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::data)); |
23517 | m.impl("requires_grad_" , static_cast<at::Tensor & (*)(at::Tensor & self, bool requires_grad)>(at::native::requires_grad_)); |
23518 | m.impl("align_as" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::align_as)); |
23519 | m.impl("_sobol_engine_ff_" , static_cast<at::Tensor & (*)(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated)>(at::native::_sobol_engine_ff_)); |
23520 | m.impl("feature_alpha_dropout" , static_cast<at::Tensor (*)(const at::Tensor & input, double p, bool train)>(at::native::feature_alpha_dropout)); |
23521 | m.impl("feature_alpha_dropout_" , static_cast<at::Tensor & (*)(at::Tensor & self, double p, bool train)>(at::native::feature_alpha_dropout_)); |
23522 | m.impl("atleast_1d" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::atleast_1d)); |
23523 | m.impl("atleast_1d.Sequence" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::atleast_1d)); |
23524 | m.impl("_batch_norm_impl_index" , static_cast<::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t> (*)(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled)>(at::native::_batch_norm_impl_index)); |
23525 | m.impl("bilinear" , static_cast<at::Tensor (*)(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias)>(at::native::bilinear)); |
23526 | m.impl("unsafe_chunk" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, int64_t chunks, int64_t dim)>(at::native::unsafe_chunk)); |
23527 | m.impl("conv_transpose1d" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation)>(at::native::conv_transpose1d)); |
23528 | m.impl("conv_transpose2d.input" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation)>(at::native::conv_transpose2d)); |
23529 | m.impl("conv_transpose3d.input" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation)>(at::native::conv_transpose3d)); |
23530 | m.impl("cosine_embedding_loss" , static_cast<at::Tensor (*)(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction)>(at::native::cosine_embedding_loss)); |
23531 | m.impl("cumulative_trapezoid.x" , static_cast<at::Tensor (*)(const at::Tensor & y, const at::Tensor & x, int64_t dim)>(at::native::cumulative_trapezoid)); |
23532 | m.impl("cumulative_trapezoid.dx" , static_cast<at::Tensor (*)(const at::Tensor & y, const at::Scalar & dx, int64_t dim)>(at::native::cumulative_trapezoid)); |
23533 | m.impl("ctc_loss.IntList" , static_cast<at::Tensor (*)(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity)>(at::native::ctc_loss)); |
23534 | m.impl("ctc_loss.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity)>(at::native::ctc_loss)); |
23535 | m.impl("gradient.scalarint" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order)>(at::native::gradient)); |
23536 | m.impl("gradient.scalararray" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order)>(at::native::gradient)); |
23537 | m.impl("gradient.array" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order)>(at::native::gradient)); |
23538 | m.impl("gradient.scalarrayint" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order)>(at::native::gradient)); |
23539 | m.impl("gradient.scalarrayarray" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order)>(at::native::gradient)); |
23540 | m.impl("gradient.tensorarrayint" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order)>(at::native::gradient)); |
23541 | m.impl("gradient.tensorarray" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order)>(at::native::gradient)); |
23542 | m.impl("einsum" , static_cast<at::Tensor (*)(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path)>(at::native::einsum)); |
23543 | m.impl("grid_sampler" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)>(at::native::grid_sampler)); |
23544 | m.impl("_grid_sampler_2d_cpu_fallback_backward" , static_cast<::std::tuple<at::Tensor,at::Tensor> (*)(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners)>(at::native::_grid_sampler_2d_cpu_fallback_backward)); |
23545 | m.impl("hinge_embedding_loss" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction)>(at::native::hinge_embedding_loss)); |
23546 | m.impl("_cufft_set_plan_cache_max_size" , static_cast<void (*)(int64_t device_index, int64_t max_size)>(at::native::_cufft_set_plan_cache_max_size)); |
23547 | m.impl("_cufft_clear_plan_cache" , static_cast<void (*)(int64_t device_index)>(at::native::_cufft_clear_plan_cache)); |
23548 | m.impl("isclose" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan)>(at::native::isclose)); |
23549 | m.impl("is_floating_point" , static_cast<bool (*)(const at::Tensor & self)>(at::native::is_floating_point)); |
23550 | m.impl("is_conj" , static_cast<bool (*)(const at::Tensor & self)>(at::native::is_conj)); |
23551 | m.impl("isreal" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::isreal)); |
23552 | m.impl("is_nonzero" , static_cast<bool (*)(const at::Tensor & self)>(at::native::is_nonzero)); |
23553 | m.impl("layer_norm" , static_cast<at::Tensor (*)(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable)>(at::native::layer_norm_symint)); |
23554 | m.impl("fbgemm_pack_gemm_matrix_fp16" , static_cast<at::Tensor (*)(const at::Tensor & input)>(at::native::fbgemm_pack_gemm_matrix_fp16)); |
23555 | m.impl("fbgemm_pack_quantized_matrix" , static_cast<at::Tensor (*)(const at::Tensor & input)>(at::native::fbgemm_pack_quantized_matrix)); |
23556 | m.impl("fbgemm_pack_quantized_matrix.KN" , static_cast<at::Tensor (*)(const at::Tensor & input, int64_t K, int64_t N)>(at::native::fbgemm_pack_quantized_matrix)); |
23557 | m.impl("value_selecting_reduction_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim)>(at::native::value_selecting_reduction_backward_symint)); |
23558 | m.impl("max_pool1d" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode)>(at::native::max_pool1d)); |
23559 | m.impl("pairwise_distance" , static_cast<at::Tensor (*)(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim)>(at::native::pairwise_distance)); |
23560 | m.impl("pdist" , static_cast<at::Tensor (*)(const at::Tensor & self, double p)>(at::native::pdist)); |
23561 | m.impl("size.int" , static_cast<int64_t (*)(const at::Tensor & self, int64_t dim)>(at::native::size)); |
23562 | m.impl("size.Dimname" , static_cast<int64_t (*)(const at::Tensor & self, at::Dimname dim)>(at::native::size)); |
23563 | m.impl("smm" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & mat2)>(at::native::smm)); |
23564 | m.impl("stride.int" , static_cast<int64_t (*)(const at::Tensor & self, int64_t dim)>(at::native::stride)); |
23565 | m.impl("stride.Dimname" , static_cast<int64_t (*)(const at::Tensor & self, at::Dimname dim)>(at::native::stride)); |
23566 | m.impl("sum_to_size" , static_cast<at::Tensor (*)(const at::Tensor & self, at::IntArrayRef size)>(at::native::sum_to_size)); |
23567 | m.impl("std" , static_cast<at::Tensor (*)(const at::Tensor & self, bool unbiased)>(at::native::std)); |
23568 | m.impl("one_hot" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t num_classes)>(at::native::one_hot)); |
23569 | m.impl("fliplr" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::fliplr)); |
23570 | m.impl("triplet_margin_loss" , static_cast<at::Tensor (*)(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction)>(at::native::triplet_margin_loss)); |
23571 | m.impl("type_as" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::type_as)); |
23572 | m.impl("var" , static_cast<at::Tensor (*)(const at::Tensor & self, bool unbiased)>(at::native::var)); |
23573 | m.impl("_sparse_log_softmax.int" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype)>(at::native::_sparse_log_softmax)); |
23574 | m.impl("_sparse_log_softmax.Dimname" , static_cast<at::Tensor (*)(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype)>(at::native::_sparse_log_softmax)); |
23575 | m.impl("sparse_csc_tensor.ccol_row_value_size" , static_cast<at::Tensor (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_csc_tensor)); |
23576 | m.impl("sparse_csc_tensor.ccol_row_value" , static_cast<at::Tensor (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::sparse_csc_tensor)); |
23577 | m.impl("_sparse_bsr_tensor_unsafe" , static_cast<at::Tensor (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory)>(at::native::_sparse_bsr_tensor_unsafe)); |
23578 | m.impl("_validate_sparse_compressed_tensor_args" , static_cast<void (*)(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout)>(at::native::_validate_sparse_compressed_tensor_args)); |
23579 | m.impl("_validate_sparse_csr_tensor_args" , static_cast<void (*)(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size)>(at::native::_validate_sparse_csr_tensor_args)); |
23580 | m.impl("_validate_sparse_csc_tensor_args" , static_cast<void (*)(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size)>(at::native::_validate_sparse_csc_tensor_args)); |
23581 | m.impl("_to_cpu" , static_cast<::std::vector<at::Tensor> (*)(at::TensorList tensors)>(at::native::_to_cpu)); |
23582 | m.impl("fake_quantize_per_tensor_affine_cachemask_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & mask)>(at::native::fake_quantize_per_tensor_affine_cachemask_backward)); |
23583 | m.impl("fake_quantize_per_channel_affine_cachemask_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & mask)>(at::native::fake_quantize_per_channel_affine_cachemask_backward)); |
23584 | m.impl("rnn_tanh_cell" , static_cast<at::Tensor (*)(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh)>(at::native::rnn_tanh_cell)); |
23585 | m.impl("set_.source_Tensor_storage_offset" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride)>(at::native::set__symint)); |
23586 | m.impl("__xor__.Scalar" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Scalar & other)>(at::native::__xor__)); |
23587 | m.impl("__xor__.Tensor" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other)>(at::native::__xor__)); |
23588 | m.impl("__ixor__.Scalar" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Scalar & other)>(at::native::__ixor__)); |
23589 | m.impl("__ixor__.Tensor" , static_cast<at::Tensor & (*)(at::Tensor & self, const at::Tensor & other)>(at::native::__ixor__)); |
23590 | m.impl("trace_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, c10::SymIntArrayRef sizes)>(at::native::trace_backward_symint)); |
23591 | m.impl("masked_select_backward" , static_cast<at::Tensor (*)(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask)>(at::native::masked_select_backward)); |
23592 | m.impl("argwhere" , static_cast<at::Tensor (*)(const at::Tensor & self)>(at::native::argwhere)); |
23593 | m.impl("cross_entropy_loss" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing)>(at::native::cross_entropy_loss_symint)); |
23594 | m.impl("_pad_enum" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value)>(at::native::_pad_enum_symint)); |
23595 | m.impl("pad" , static_cast<at::Tensor (*)(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value)>(at::native::pad_symint)); |
23596 | m.impl("upsample_nearest2d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::upsample_nearest2d)); |
23597 | m.impl("_upsample_nearest_exact2d.vec" , static_cast<at::Tensor (*)(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors)>(at::native::_upsample_nearest_exact2d)); |
23598 | m.impl("_add_batch_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t batch_dim, int64_t level)>(at::native::_add_batch_dim)); |
23599 | m.impl("_remove_batch_dim" , static_cast<at::Tensor (*)(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim)>(at::native::_remove_batch_dim)); |
23600 | m.impl("fft_ifftshift" , static_cast<at::Tensor (*)(const at::Tensor & self, at::OptionalIntArrayRef dim)>(at::native::fft_ifftshift)); |
23601 | m.impl("_test_serialization_subcmul" , static_cast<at::Tensor (*)(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha)>(at::native::_test_serialization_subcmul)); |
23602 | m.impl("unflatten_dense_tensors" , static_cast<::std::vector<at::Tensor> (*)(const at::Tensor & flat, at::TensorList tensors)>(at::native::unflatten_dense_tensors));; |
23603 | } |
23604 | |
23605 | } // namespace |
23606 | |
23607 | } // namespace at |
23608 | |